autocoder-nano 0.1.25__py3-none-any.whl → 0.1.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,6 +15,8 @@ from difflib import SequenceMatcher
15
15
 
16
16
  from autocoder_nano.agent.new.auto_new_project import BuildNewProject
17
17
  from autocoder_nano.helper import show_help
18
+ from autocoder_nano.index.entry import build_index_and_filter_files
19
+ from autocoder_nano.index.index_manager import IndexManager
18
20
  from autocoder_nano.llm_client import AutoLLM
19
21
  from autocoder_nano.version import __version__
20
22
  from autocoder_nano.llm_types import *
@@ -61,8 +63,10 @@ memory = {
61
63
  "current_files": {"files": [], "groups": {}},
62
64
  "conf": {
63
65
  "auto_merge": "editblock",
64
- "current_chat_model": "",
65
- "current_code_model": ""
66
+ # "current_chat_model": "",
67
+ # "current_code_model": "",
68
+ "chat_model": "",
69
+ "code_model": "",
66
70
  },
67
71
  "exclude_dirs": [],
68
72
  "mode": "normal", # 新增mode字段,默认为normal模式
@@ -810,433 +814,8 @@ def symbols_info_to_str(info: SymbolsInfo, symbol_types: List[SymbolType]) -> st
810
814
  return "\n".join(result)
811
815
 
812
816
 
813
- class IndexManager:
814
- def __init__(self, source_codes: List[SourceCode], llm: AutoLLM = None):
815
- self.args = args
816
- self.sources = source_codes
817
- self.source_dir = args.source_dir
818
- self.index_dir = os.path.join(self.source_dir, ".auto-coder")
819
- self.index_file = os.path.join(self.index_dir, "index.json")
820
- self.llm = llm
821
- self.llm.setup_default_model_name(memory["conf"]["current_chat_model"])
822
- self.max_input_length = args.model_max_input_length # 模型输入最大长度
823
- # 使用 time.sleep(self.anti_quota_limit) 防止超过 API 频率限制
824
- self.anti_quota_limit = args.anti_quota_limit
825
- # 如果索引目录不存在,则创建它
826
- if not os.path.exists(self.index_dir):
827
- os.makedirs(self.index_dir)
828
-
829
- def build_index(self):
830
- """ 构建或更新索引,使用多线程处理多个文件,并将更新后的索引数据写入文件 """
831
- if os.path.exists(self.index_file):
832
- with open(self.index_file, "r") as file: # 读缓存
833
- index_data = json.load(file)
834
- else: # 首次 build index
835
- logger.info("首次生成索引.")
836
- index_data = {}
837
-
838
- @prompt()
839
- def error_message(source_dir: str, file_path: str):
840
- """
841
- The source_dir is different from the path in index file (e.g. file_path:{{ file_path }} source_dir:{{
842
- source_dir }}). You may need to replace the prefix with the source_dir in the index file or Just delete
843
- the index file to rebuild it.
844
- """
845
-
846
- for item in index_data.keys():
847
- if not item.startswith(self.source_dir):
848
- logger.warning(error_message(source_dir=self.source_dir, file_path=item))
849
- break
850
-
851
- updated_sources = []
852
- wait_to_build_files = []
853
- for source in self.sources:
854
- source_code = source.source_code
855
- md5 = hashlib.md5(source_code.encode("utf-8")).hexdigest()
856
- if source.module_name not in index_data or index_data[source.module_name]["md5"] != md5:
857
- wait_to_build_files.append(source)
858
- counter = 0
859
- num_files = len(wait_to_build_files)
860
- total_files = len(self.sources)
861
- logger.info(f"总文件数: {total_files}, 需要索引文件数: {num_files}")
862
-
863
- for source in wait_to_build_files:
864
- build_result = self.build_index_for_single_source(source)
865
- if build_result is not None:
866
- counter += 1
867
- logger.info(f"正在构建索引:{counter}/{num_files}...")
868
- module_name = build_result["module_name"]
869
- index_data[module_name] = build_result
870
- updated_sources.append(module_name)
871
- if updated_sources:
872
- with open(self.index_file, "w") as fp:
873
- json_str = json.dumps(index_data, indent=2, ensure_ascii=False)
874
- fp.write(json_str)
875
- return index_data
876
-
877
- def split_text_into_chunks(self, text):
878
- """ 文本分块,将大文本分割成适合 LLM 处理的小块 """
879
- lines = text.split("\n")
880
- chunks = []
881
- current_chunk = []
882
- current_length = 0
883
- for line in lines:
884
- if current_length + len(line) + 1 <= self.max_input_length:
885
- current_chunk.append(line)
886
- current_length += len(line) + 1
887
- else:
888
- chunks.append("\n".join(current_chunk))
889
- current_chunk = [line]
890
- current_length = len(line) + 1
891
- if current_chunk:
892
- chunks.append("\n".join(current_chunk))
893
- return chunks
894
-
895
- @prompt()
896
- def get_all_file_symbols(self, path: str, code: str) -> str:
897
- """
898
- 你的目标是从给定的代码中获取代码里的符号,需要获取的符号类型包括:
899
-
900
- 1. 函数
901
- 2. 类
902
- 3. 变量
903
- 4. 所有导入语句
904
-
905
- 如果没有任何符号,返回空字符串就行。
906
- 如果有符号,按如下格式返回:
907
-
908
- ```
909
- {符号类型}: {符号名称}, {符号名称}, ...
910
- ```
911
-
912
- 注意:
913
- 1. 直接输出结果,不要尝试使用任何代码
914
- 2. 不要分析代码的内容和目的
915
- 3. 用途的长度不能超过100字符
916
- 4. 导入语句的分隔符为^^
917
-
918
- 下面是一段示例:
919
-
920
- ## 输入
921
- 下列是文件 /test.py 的源码:
922
-
923
- import os
924
- import time
925
- from loguru import logger
926
- import byzerllm
927
-
928
- a = ""
929
-
930
- @byzerllm.prompt(render="jinja")
931
- def auto_implement_function_template(instruction:str, content:str)->str:
932
-
933
- ## 输出
934
- 用途:主要用于提供自动实现函数模板的功能。
935
- 函数:auto_implement_function_template
936
- 变量:a
937
- 类:
938
- 导入语句:import os^^import time^^from loguru import logger^^import byzerllm
939
-
940
- 现在,让我们开始一个新的任务:
941
-
942
- ## 输入
943
- 下列是文件 {{ path }} 的源码:
944
-
945
- {{ code }}
946
-
947
- ## 输出
948
- """
949
-
950
- def build_index_for_single_source(self, source: SourceCode):
951
- """ 处理单个源文件,提取符号信息并存储元数据 """
952
- file_path = source.module_name
953
- if not os.path.exists(file_path): # 过滤不存在的文件
954
- return None
955
-
956
- ext = os.path.splitext(file_path)[1].lower()
957
- if ext in [".md", ".html", ".txt", ".doc", ".pdf"]: # 过滤文档文件
958
- return None
959
-
960
- if source.source_code.strip() == "":
961
- return None
962
-
963
- md5 = hashlib.md5(source.source_code.encode("utf-8")).hexdigest()
964
-
965
- try:
966
- start_time = time.monotonic()
967
- source_code = source.source_code
968
- if len(source.source_code) > self.max_input_length:
969
- logger.warning(
970
- f"警告[构建索引]: 源代码({source.module_name})长度过长 "
971
- f"({len(source.source_code)}) > 模型最大输入长度({self.max_input_length}),"
972
- f"正在分割为多个块..."
973
- )
974
- chunks = self.split_text_into_chunks(source_code)
975
- symbols_list = []
976
- for chunk in chunks:
977
- chunk_symbols = self.get_all_file_symbols.with_llm(self.llm).run(source.module_name, chunk)
978
- time.sleep(self.anti_quota_limit)
979
- symbols_list.append(chunk_symbols.output)
980
- symbols = "\n".join(symbols_list)
981
- else:
982
- single_symbols = self.get_all_file_symbols.with_llm(self.llm).run(source.module_name, source_code)
983
- symbols = single_symbols.output
984
- time.sleep(self.anti_quota_limit)
985
-
986
- logger.info(f"解析并更新索引:文件 {file_path}(MD5: {md5}),耗时 {time.monotonic() - start_time:.2f} 秒")
987
- except Exception as e:
988
- logger.warning(f"源文件 {file_path} 处理失败: {e}")
989
- return None
990
-
991
- return {
992
- "module_name": source.module_name,
993
- "symbols": symbols,
994
- "last_modified": os.path.getmtime(file_path),
995
- "md5": md5,
996
- }
997
-
998
- @prompt()
999
- def _get_target_files_by_query(self, indices: str, query: str) -> str:
1000
- """
1001
- 下面是已知文件以及对应的符号信息:
1002
-
1003
- {{ indices }}
1004
-
1005
- 用户的问题是:
1006
-
1007
- {{ query }}
1008
-
1009
- 现在,请根据用户的问题以及前面的文件和符号信息,寻找相关文件路径。返回结果按如下格式:
1010
-
1011
- ```json
1012
- {
1013
- "file_list": [
1014
- {
1015
- "file_path": "path/to/file.py",
1016
- "reason": "The reason why the file is the target file"
1017
- },
1018
- {
1019
- "file_path": "path/to/file.py",
1020
- "reason": "The reason why the file is the target file"
1021
- }
1022
- ]
1023
- }
1024
- ```
1025
-
1026
- 如果没有找到,返回如下 json 即可:
1027
-
1028
- ```json
1029
- {"file_list": []}
1030
- ```
1031
-
1032
- 请严格遵循以下步骤:
1033
-
1034
- 1. 识别特殊标记:
1035
- - 查找query中的 `@` 符号,它后面的内容是用户关注的文件路径。
1036
- - 查找query中的 `@@` 符号,它后面的内容是用户关注的符号(如函数名、类名、变量名)。
1037
-
1038
- 2. 匹配文件路径:
1039
- - 对于 `@` 标记,在indices中查找包含该路径的所有文件。
1040
- - 路径匹配应该是部分匹配,因为用户可能只提供了路径的一部分。
1041
-
1042
- 3. 匹配符号:
1043
- - 对于 `@@` 标记,在indices中所有文件的符号信息中查找该符号。
1044
- - 检查函数、类、变量等所有符号类型。
1045
-
1046
- 4. 分析依赖关系:
1047
- - 利用 "导入语句" 信息确定文件间的依赖关系。
1048
- - 如果找到了相关文件,也包括与之直接相关的依赖文件。
1049
-
1050
- 5. 考虑文件用途:
1051
- - 使用每个文件的 "用途" 信息来判断其与查询的相关性。
1052
-
1053
- 6. 请严格按格式要求返回结果,无需额外的说明
1054
-
1055
- 请确保结果的准确性和完整性,包括所有可能相关的文件。
1056
- """
1057
-
1058
- def read_index(self) -> List[IndexItem]:
1059
- """ 读取并解析索引文件,将其转换为 IndexItem 对象列表 """
1060
- if not os.path.exists(self.index_file):
1061
- return []
1062
-
1063
- with open(self.index_file, "r") as file:
1064
- index_data = json.load(file)
1065
-
1066
- index_items = []
1067
- for module_name, data in index_data.items():
1068
- index_item = IndexItem(
1069
- module_name=module_name,
1070
- symbols=data["symbols"],
1071
- last_modified=data["last_modified"],
1072
- md5=data["md5"]
1073
- )
1074
- index_items.append(index_item)
1075
-
1076
- return index_items
1077
-
1078
- def _get_meta_str(self, includes: Optional[List[SymbolType]] = None):
1079
- index_items = self.read_index()
1080
- current_chunk = []
1081
- for item in index_items:
1082
- symbols_str = item.symbols
1083
- if includes:
1084
- symbol_info = extract_symbols(symbols_str)
1085
- symbols_str = symbols_info_to_str(symbol_info, includes)
1086
-
1087
- item_str = f"##{item.module_name}\n{symbols_str}\n\n"
1088
- if len(current_chunk) > self.args.filter_batch_size:
1089
- yield "".join(current_chunk)
1090
- current_chunk = [item_str]
1091
- else:
1092
- current_chunk.append(item_str)
1093
- if current_chunk:
1094
- yield "".join(current_chunk)
1095
-
1096
- def get_target_files_by_query(self, query: str):
1097
- """ 根据查询条件查找相关文件,考虑不同过滤级别 """
1098
- all_results = []
1099
- completed = 0
1100
- total = 0
1101
-
1102
- includes = None
1103
- if self.args.index_filter_level == 0:
1104
- includes = [SymbolType.USAGE]
1105
- if self.args.index_filter_level >= 1:
1106
- includes = None
1107
-
1108
- for chunk in self._get_meta_str(includes=includes):
1109
- result = self._get_target_files_by_query.with_llm(self.llm).with_return_type(FileList).run(chunk, query)
1110
- if result is not None:
1111
- all_results.extend(result.file_list)
1112
- completed += 1
1113
- else:
1114
- logger.warning(f"无法找到分块的目标文件。原因可能是模型响应未返回 JSON 格式数据,或返回的 JSON 为空。")
1115
- total += 1
1116
- time.sleep(self.anti_quota_limit)
1117
-
1118
- logger.info(f"已完成 {completed}/{total} 个分块(基于查询条件)")
1119
- all_results = list({file.file_path: file for file in all_results}.values())
1120
- if self.args.index_filter_file_num > 0:
1121
- limited_results = all_results[: self.args.index_filter_file_num]
1122
- return FileList(file_list=limited_results)
1123
- return FileList(file_list=all_results)
1124
-
1125
- @prompt()
1126
- def _get_related_files(self, indices: str, file_paths: str) -> str:
1127
- """
1128
- 下面是所有文件以及对应的符号信息:
1129
-
1130
- {{ indices }}
1131
-
1132
- 请参考上面的信息,找到被下列文件使用或者引用到的文件列表:
1133
-
1134
- {{ file_paths }}
1135
-
1136
- 请按如下格式进行输出:
1137
-
1138
- ```json
1139
- {
1140
- "file_list": [
1141
- {
1142
- "file_path": "path/to/file.py",
1143
- "reason": "The reason why the file is the target file"
1144
- },
1145
- {
1146
- "file_path": "path/to/file.py",
1147
- "reason": "The reason why the file is the target file"
1148
- }
1149
- ]
1150
- }
1151
- ```
1152
-
1153
- 如果没有相关的文件,输出如下 json 即可:
1154
-
1155
- ```json
1156
- {"file_list": []}
1157
- ```
1158
-
1159
- 注意,
1160
- 1. 找到的文件名必须出现在上面的文件列表中
1161
- 2. 原因控制在20字以内, 且使用中文
1162
- 3. 请严格按格式要求返回结果,无需额外的说明
1163
- """
1164
-
1165
- def get_related_files(self, file_paths: List[str]):
1166
- """ 根据文件路径查询相关文件 """
1167
- all_results = []
1168
-
1169
- completed = 0
1170
- total = 0
1171
-
1172
- for chunk in self._get_meta_str():
1173
- result = self._get_related_files.with_llm(self.llm).with_return_type(
1174
- FileList).run(chunk, "\n".join(file_paths))
1175
- if result is not None:
1176
- all_results.extend(result.file_list)
1177
- completed += 1
1178
- else:
1179
- logger.warning(f"无法找到与分块相关的文件。原因可能是模型限制或查询条件与文件不匹配。")
1180
- total += 1
1181
- time.sleep(self.anti_quota_limit)
1182
- logger.info(f"已完成 {completed}/{total} 个分块(基于相关文件)")
1183
- all_results = list({file.file_path: file for file in all_results}.values())
1184
- return FileList(file_list=all_results)
1185
-
1186
- @prompt()
1187
- def verify_file_relevance(self, file_content: str, query: str) -> str:
1188
- """
1189
- 请验证下面的文件内容是否与用户问题相关:
1190
-
1191
- 文件内容:
1192
- {{ file_content }}
1193
-
1194
- 用户问题:
1195
- {{ query }}
1196
-
1197
- 相关是指,需要依赖这个文件提供上下文,或者需要修改这个文件才能解决用户的问题。
1198
- 请给出相应的可能性分数:0-10,并结合用户问题,理由控制在50字以内,并且使用中文。
1199
- 请严格按格式要求返回结果。
1200
- 格式如下:
1201
-
1202
- ```json
1203
- {
1204
- "relevant_score": 0-10,
1205
- "reason": "这是相关的原因..."
1206
- }
1207
- ```
1208
- """
1209
-
1210
-
1211
817
  def index_command(llm):
1212
- conf = memory.get("conf", {})
1213
- # 默认 chat 配置
1214
- yaml_config = {
1215
- "include_file": ["./base/base.yml"],
1216
- "include_project_structure": conf.get("include_project_structure", "true") in ["true", "True"],
1217
- "human_as_model": conf.get("human_as_model", "false") == "true",
1218
- "skip_build_index": conf.get("skip_build_index", "true") == "true",
1219
- "skip_confirm": conf.get("skip_confirm", "true") == "true",
1220
- "silence": conf.get("silence", "true") == "true",
1221
- "query": ""
1222
- }
1223
- current_files = memory["current_files"]["files"] # get_llm_friendly_package_docs
1224
- yaml_config["urls"] = current_files
1225
- yaml_config["query"] = ""
1226
-
1227
- # 如果 conf 中有设置, 则以 conf 配置为主
1228
- for key, value in conf.items():
1229
- converted_value = convert_config_value(key, value)
1230
- if converted_value is not None:
1231
- yaml_config[key] = converted_value
1232
-
1233
- yaml_content = convert_yaml_config_to_str(yaml_config=yaml_config)
1234
- execute_file = os.path.join(args.source_dir, "actions", f"{uuid.uuid4()}.yml")
1235
-
1236
- with open(os.path.join(execute_file), "w") as f: # 保存此次查询的细节
1237
- f.write(yaml_content)
1238
-
1239
- convert_yaml_to_config(execute_file) # 更新到args
818
+ update_config_to_args(query="", delete_execute_file=True)
1240
819
 
1241
820
  source_dir = os.path.abspath(args.source_dir)
1242
821
  logger.info(f"开始对目录 {source_dir} 中的源代码进行索引")
@@ -1246,7 +825,7 @@ def index_command(llm):
1246
825
  pp = SuffixProject(llm=llm, args=args)
1247
826
  pp.run()
1248
827
  _sources = pp.sources
1249
- index_manager = IndexManager(source_codes=_sources, llm=llm)
828
+ index_manager = IndexManager(args=args, source_codes=_sources, llm=llm)
1250
829
  index_manager.build_index()
1251
830
 
1252
831
 
@@ -1332,34 +911,7 @@ def wrap_text_in_table(data, max_width=60):
1332
911
 
1333
912
 
1334
913
  def index_query_command(query: str, llm: AutoLLM):
1335
- conf = memory.get("conf", {})
1336
- # 默认 chat 配置
1337
- yaml_config = {
1338
- "include_file": ["./base/base.yml"],
1339
- "include_project_structure": conf.get("include_project_structure", "true") in ["true", "True"],
1340
- "human_as_model": conf.get("human_as_model", "false") == "true",
1341
- "skip_build_index": conf.get("skip_build_index", "true") == "true",
1342
- "skip_confirm": conf.get("skip_confirm", "true") == "true",
1343
- "silence": conf.get("silence", "true") == "true",
1344
- "query": query
1345
- }
1346
- current_files = memory["current_files"]["files"] # get_llm_friendly_package_docs
1347
- yaml_config["urls"] = current_files
1348
- yaml_config["query"] = query
1349
-
1350
- # 如果 conf 中有设置, 则以 conf 配置为主
1351
- for key, value in conf.items():
1352
- converted_value = convert_config_value(key, value)
1353
- if converted_value is not None:
1354
- yaml_config[key] = converted_value
1355
-
1356
- yaml_content = convert_yaml_config_to_str(yaml_config=yaml_config)
1357
- execute_file = os.path.join(args.source_dir, "actions", f"{uuid.uuid4()}.yml")
1358
-
1359
- with open(os.path.join(execute_file), "w") as f: # 保存此次查询的细节
1360
- f.write(yaml_content)
1361
-
1362
- convert_yaml_to_config(execute_file) # 更新到args
914
+ update_config_to_args(query=query, delete_execute_file=True)
1363
915
 
1364
916
  # args.query = query
1365
917
  if args.project_type == "py":
@@ -1370,7 +922,7 @@ def index_query_command(query: str, llm: AutoLLM):
1370
922
  _sources = pp.sources
1371
923
 
1372
924
  final_files = []
1373
- index_manager = IndexManager(source_codes=_sources, llm=llm)
925
+ index_manager = IndexManager(args=args, source_codes=_sources, llm=llm)
1374
926
  target_files = index_manager.get_target_files_by_query(query)
1375
927
 
1376
928
  if target_files:
@@ -1397,159 +949,6 @@ def index_query_command(query: str, llm: AutoLLM):
1397
949
  return
1398
950
 
1399
951
 
1400
- def build_index_and_filter_files(llm, sources: List[SourceCode]) -> str:
1401
- def get_file_path(_file_path):
1402
- if _file_path.startswith("##"):
1403
- return _file_path.strip()[2:]
1404
- return _file_path
1405
-
1406
- final_files: Dict[str, TargetFile] = {}
1407
- logger.info("第一阶段:处理 REST/RAG/Search 资源...")
1408
- for source in sources:
1409
- if source.tag in ["REST", "RAG", "SEARCH"]:
1410
- final_files[get_file_path(source.module_name)] = TargetFile(
1411
- file_path=source.module_name, reason="Rest/Rag/Search"
1412
- )
1413
-
1414
- if not args.skip_build_index and llm:
1415
- logger.info("第二阶段:为所有文件构建索引...")
1416
- index_manager = IndexManager(llm=llm, source_codes=sources)
1417
- index_data = index_manager.build_index()
1418
- indexed_files_count = len(index_data) if index_data else 0
1419
- logger.info(f"总索引文件数: {indexed_files_count}")
1420
-
1421
- if not args.skip_filter_index and args.index_filter_level >= 1:
1422
- logger.info("第三阶段:执行 Level 1 过滤(基于查询) ...")
1423
- target_files = index_manager.get_target_files_by_query(args.query)
1424
- if target_files:
1425
- for file in target_files.file_list:
1426
- file_path = file.file_path.strip()
1427
- final_files[get_file_path(file_path)] = file
1428
-
1429
- if target_files is not None and args.index_filter_level >= 2:
1430
- logger.info("第四阶段:执行 Level 2 过滤(基于相关文件)...")
1431
- related_files = index_manager.get_related_files(
1432
- [file.file_path for file in target_files.file_list]
1433
- )
1434
- if related_files is not None:
1435
- for file in related_files.file_list:
1436
- file_path = file.file_path.strip()
1437
- final_files[get_file_path(file_path)] = file
1438
-
1439
- # 如果 Level 1 filtering 和 Level 2 filtering 都未获取路径,则使用全部文件
1440
- if not final_files:
1441
- logger.warning("Level 1, Level 2 过滤未找到相关文件, 将使用所有文件 ...")
1442
- for source in sources:
1443
- final_files[get_file_path(source.module_name)] = TargetFile(
1444
- file_path=source.module_name,
1445
- reason="No related files found, use all files",
1446
- )
1447
-
1448
- logger.info("第五阶段:执行相关性验证 ...")
1449
- verified_files = {}
1450
- temp_files = list(final_files.values())
1451
- verification_results = []
1452
-
1453
- def _print_verification_results(results):
1454
- table = Table(title="文件相关性验证结果", expand=True, show_lines=True)
1455
- table.add_column("文件路径", style="cyan", no_wrap=True)
1456
- table.add_column("得分", justify="right", style="green")
1457
- table.add_column("状态", style="yellow")
1458
- table.add_column("原因/错误")
1459
- if result:
1460
- for _file_path, _score, _status, _reason in results:
1461
- table.add_row(_file_path,
1462
- str(_score) if _score is not None else "N/A", _status, _reason)
1463
- console.print(table)
1464
-
1465
- def _verify_single_file(single_file: TargetFile):
1466
- for _source in sources:
1467
- if _source.module_name == single_file.file_path:
1468
- file_content = _source.source_code
1469
- try:
1470
- _result = index_manager.verify_file_relevance.with_llm(llm).with_return_type(
1471
- VerifyFileRelevance).run(
1472
- file_content=file_content,
1473
- query=args.query
1474
- )
1475
- if _result.relevant_score >= args.verify_file_relevance_score:
1476
- verified_files[single_file.file_path] = TargetFile(
1477
- file_path=single_file.file_path,
1478
- reason=f"Score:{_result.relevant_score}, {_result.reason}"
1479
- )
1480
- return single_file.file_path, _result.relevant_score, "PASS", _result.reason
1481
- else:
1482
- return single_file.file_path, _result.relevant_score, "FAIL", _result.reason
1483
- except Exception as e:
1484
- error_msg = str(e)
1485
- verified_files[single_file.file_path] = TargetFile(
1486
- file_path=single_file.file_path,
1487
- reason=f"Verification failed: {error_msg}"
1488
- )
1489
- return single_file.file_path, None, "ERROR", error_msg
1490
- return
1491
-
1492
- for pending_verify_file in temp_files:
1493
- result = _verify_single_file(pending_verify_file)
1494
- if result:
1495
- verification_results.append(result)
1496
- time.sleep(args.anti_quota_limit)
1497
-
1498
- _print_verification_results(verification_results)
1499
- # Keep all files, not just verified ones
1500
- final_files = verified_files
1501
-
1502
- logger.info("第六阶段:筛选文件并应用限制条件 ...")
1503
- if args.index_filter_file_num > 0:
1504
- logger.info(f"从 {len(final_files)} 个文件中获取前 {args.index_filter_file_num} 个文件(Limit)")
1505
- final_filenames = [file.file_path for file in final_files.values()]
1506
- if not final_filenames:
1507
- logger.warning("未找到目标文件,你可能需要重新编写查询并重试.")
1508
- if args.index_filter_file_num > 0:
1509
- final_filenames = final_filenames[: args.index_filter_file_num]
1510
-
1511
- def _shorten_path(path: str, keep_levels: int = 3) -> str:
1512
- """
1513
- 优化长路径显示,保留最后指定层级
1514
- 示例:/a/b/c/d/e/f.py -> .../c/d/e/f.py
1515
- """
1516
- parts = path.split(os.sep)
1517
- if len(parts) > keep_levels:
1518
- return ".../" + os.sep.join(parts[-keep_levels:])
1519
- return path
1520
-
1521
- def _print_selected(data):
1522
- table = Table(title="代码上下文文件", expand=True, show_lines=True)
1523
- table.add_column("文件路径", style="cyan")
1524
- table.add_column("原因", style="cyan")
1525
- for _file, _reason in data:
1526
- # 路径截取优化:保留最后 3 级路径
1527
- _processed_path = _shorten_path(_file, keep_levels=3)
1528
- table.add_row(_processed_path, _reason)
1529
- console.print(table)
1530
-
1531
- logger.info("第七阶段:准备最终输出 ...")
1532
- _print_selected(
1533
- [
1534
- (file.file_path, file.reason)
1535
- for file in final_files.values()
1536
- if file.file_path in final_filenames
1537
- ]
1538
- )
1539
- result_source_code = ""
1540
- depulicated_sources = set()
1541
-
1542
- for file in sources:
1543
- if file.module_name in final_filenames:
1544
- if file.module_name in depulicated_sources:
1545
- continue
1546
- depulicated_sources.add(file.module_name)
1547
- result_source_code += f"##File: {file.module_name}\n"
1548
- result_source_code += f"{file.source_code}\n\n"
1549
-
1550
- return result_source_code
1551
-
1552
-
1553
952
  def convert_yaml_config_to_str(yaml_config):
1554
953
  yaml_content = yaml.safe_dump(
1555
954
  yaml_config,
@@ -1598,6 +997,41 @@ def convert_config_value(key, value):
1598
997
  return None
1599
998
 
1600
999
 
1000
+ def update_config_to_args(query, delete_execute_file: bool = False):
1001
+ conf = memory.get("conf", {})
1002
+
1003
+ # 默认 chat 配置
1004
+ yaml_config = {
1005
+ "include_file": ["./base/base.yml"],
1006
+ "skip_build_index": conf.get("skip_build_index", "true") == "true",
1007
+ "skip_confirm": conf.get("skip_confirm", "true") == "true",
1008
+ "chat_model": conf.get("chat_model", ""),
1009
+ "code_model": conf.get("code_model", ""),
1010
+ "auto_merge": conf.get("auto_merge", "editblock")
1011
+ }
1012
+ current_files = memory["current_files"]["files"]
1013
+ yaml_config["urls"] = current_files
1014
+ yaml_config["query"] = query
1015
+
1016
+ # 如果 conf 中有设置, 则以 conf 配置为主
1017
+ for key, value in conf.items():
1018
+ converted_value = convert_config_value(key, value)
1019
+ if converted_value is not None:
1020
+ yaml_config[key] = converted_value
1021
+
1022
+ yaml_content = convert_yaml_config_to_str(yaml_config=yaml_config)
1023
+ execute_file = os.path.join(args.source_dir, "actions", f"{uuid.uuid4()}.yml")
1024
+
1025
+ with open(os.path.join(execute_file), "w") as f: # 保存此次查询的细节
1026
+ f.write(yaml_content)
1027
+
1028
+ convert_yaml_to_config(execute_file) # 更新到args
1029
+
1030
+ if delete_execute_file:
1031
+ if os.path.exists(execute_file):
1032
+ os.remove(execute_file)
1033
+
1034
+
1601
1035
  def print_chat_history(history, max_entries=5):
1602
1036
  recent_history = history[-max_entries:]
1603
1037
  table = Table(show_header=False, padding=(0, 1), expand=True, show_lines=True)
@@ -1638,6 +1072,8 @@ def code_review(query: str) -> str:
1638
1072
 
1639
1073
 
1640
1074
  def chat(query: str, llm: AutoLLM):
1075
+ update_config_to_args(query)
1076
+
1641
1077
  is_history = query.strip().startswith("/history")
1642
1078
  is_new = "/new" in query
1643
1079
  if is_new:
@@ -1652,36 +1088,6 @@ def chat(query: str, llm: AutoLLM):
1652
1088
  query = query.replace("/review", "", 1).strip()
1653
1089
  query = code_review.prompt(query)
1654
1090
 
1655
- conf = memory.get("conf", {})
1656
- # 默认 chat 配置
1657
- yaml_config = {
1658
- "include_file": ["./base/base.yml"],
1659
- "include_project_structure": conf.get("include_project_structure", "true") in ["true", "True"],
1660
- "human_as_model": conf.get("human_as_model", "false") == "true",
1661
- "skip_build_index": conf.get("skip_build_index", "true") == "true",
1662
- "skip_confirm": conf.get("skip_confirm", "true") == "true",
1663
- "silence": conf.get("silence", "true") == "true",
1664
- "query": query
1665
- }
1666
- current_files = memory["current_files"]["files"] # get_llm_friendly_package_docs
1667
- yaml_config["urls"] = current_files
1668
-
1669
- yaml_config["query"] = query
1670
-
1671
- # 如果 conf 中有设置, 则以 conf 配置为主
1672
- for key, value in conf.items():
1673
- converted_value = convert_config_value(key, value)
1674
- if converted_value is not None:
1675
- yaml_config[key] = converted_value
1676
-
1677
- yaml_content = convert_yaml_config_to_str(yaml_config=yaml_config)
1678
- execute_file = os.path.join(args.source_dir, "actions", f"{uuid.uuid4()}.yml")
1679
-
1680
- with open(os.path.join(execute_file), "w") as f: # 保存此次查询的细节
1681
- f.write(yaml_content)
1682
-
1683
- convert_yaml_to_config(execute_file) # 更新到args
1684
-
1685
1091
  memory_dir = os.path.join(args.source_dir, ".auto-coder", "memory")
1686
1092
  os.makedirs(memory_dir, exist_ok=True)
1687
1093
  memory_file = os.path.join(memory_dir, "chat_history.json")
@@ -1745,7 +1151,7 @@ def chat(query: str, llm: AutoLLM):
1745
1151
  pp = SuffixProject(llm=llm, args=args)
1746
1152
  pp.run()
1747
1153
  _sources = pp.sources
1748
- s = build_index_and_filter_files(llm=llm, sources=_sources)
1154
+ s = build_index_and_filter_files(args=args, llm=llm, sources=_sources)
1749
1155
  if s:
1750
1156
  pre_conversations.append(
1751
1157
  {
@@ -1760,7 +1166,7 @@ def chat(query: str, llm: AutoLLM):
1760
1166
 
1761
1167
  loaded_conversations = pre_conversations + chat_history["ask_conversation"]
1762
1168
 
1763
- v = chat_llm.stream_chat_ai(conversations=loaded_conversations, model=memory["conf"]["current_chat_model"])
1169
+ v = chat_llm.stream_chat_ai(conversations=loaded_conversations, model=args.chat_model)
1764
1170
 
1765
1171
  MAX_HISTORY_LINES = 15 # 最大保留历史行数
1766
1172
  lines_buffer = []
@@ -1913,7 +1319,7 @@ def load_include_files(config, base_path, max_depth=10, current_depth=0):
1913
1319
  class CodeAutoGenerateEditBlock:
1914
1320
  def __init__(self, llm: AutoLLM, action=None, fence_0: str = "```", fence_1: str = "```"):
1915
1321
  self.llm = llm
1916
- self.llm.setup_default_model_name(memory["conf"]["current_code_model"])
1322
+ # self.llm.setup_default_model_name(memory["conf"]["current_code_model"])
1917
1323
  self.args = args
1918
1324
  self.action = action
1919
1325
  self.fence_0 = fence_0
@@ -2108,7 +1514,7 @@ class CodeAutoGenerateEditBlock:
2108
1514
  results = []
2109
1515
 
2110
1516
  for llm in self.llms:
2111
- v = llm.chat_ai(conversations=conversations)
1517
+ v = llm.chat_ai(conversations=conversations, model=args.code_model)
2112
1518
  results.append(v.output)
2113
1519
  for result in results:
2114
1520
  conversations_list.append(conversations + [{"role": "assistant", "content": result}])
@@ -2285,7 +1691,7 @@ class CodeAutoGenerateEditBlock:
2285
1691
  conversations = [{"role": "user", "content": init_prompt}]
2286
1692
 
2287
1693
  code_llm = self.llms[0]
2288
- v = code_llm.chat_ai(conversations=conversations)
1694
+ v = code_llm.chat_ai(conversations=conversations, model=args.code_model)
2289
1695
  results.append(v.output)
2290
1696
 
2291
1697
  conversations.append({"role": "assistant", "content": v.output})
@@ -2301,7 +1707,7 @@ class CodeAutoGenerateEditBlock:
2301
1707
  with open(self.args.target_file, "w") as file:
2302
1708
  file.write("继续")
2303
1709
 
2304
- t = code_llm.chat_ai(conversations=conversations)
1710
+ t = code_llm.chat_ai(conversations=conversations, model=args.code_model)
2305
1711
 
2306
1712
  results.append(t.output)
2307
1713
  conversations.append({"role": "assistant", "content": t.output})
@@ -2316,7 +1722,7 @@ class CodeAutoGenerateEditBlock:
2316
1722
  class CodeModificationRanker:
2317
1723
  def __init__(self, llm: AutoLLM):
2318
1724
  self.llm = llm
2319
- self.llm.setup_default_model_name(memory["conf"]["current_code_model"])
1725
+ self.llm.setup_default_model_name(args.code_model)
2320
1726
  self.args = args
2321
1727
  self.llms = [self.llm]
2322
1728
 
@@ -2436,7 +1842,7 @@ class TextSimilarity:
2436
1842
  class CodeAutoMergeEditBlock:
2437
1843
  def __init__(self, llm: AutoLLM, fence_0: str = "```", fence_1: str = "```"):
2438
1844
  self.llm = llm
2439
- self.llm.setup_default_model_name(memory["conf"]["current_code_model"])
1845
+ self.llm.setup_default_model_name(args.code_model)
2440
1846
  self.args = args
2441
1847
  self.fence_0 = fence_0
2442
1848
  self.fence_1 = fence_1
@@ -2825,7 +2231,7 @@ class ActionPyProject(BaseAction):
2825
2231
  pp.run()
2826
2232
  source_code = pp.output()
2827
2233
  if self.llm:
2828
- source_code = build_index_and_filter_files(llm=self.llm, sources=pp.sources)
2234
+ source_code = build_index_and_filter_files(args=args, llm=self.llm, sources=pp.sources)
2829
2235
  self.process_content(source_code)
2830
2236
  return True
2831
2237
 
@@ -2884,7 +2290,7 @@ class ActionSuffixProject(BaseAction):
2884
2290
  pp.run()
2885
2291
  source_code = pp.output()
2886
2292
  if self.llm:
2887
- source_code = build_index_and_filter_files(llm=self.llm, sources=pp.sources)
2293
+ source_code = build_index_and_filter_files(args=args, llm=self.llm, sources=pp.sources)
2888
2294
  self.process_content(source_code)
2889
2295
 
2890
2296
  def process_content(self, content: str):
@@ -3003,12 +2409,11 @@ def coding(query: str, llm: AutoLLM):
3003
2409
  if latest_yaml_file:
3004
2410
  yaml_config = {
3005
2411
  "include_file": ["./base/base.yml"],
3006
- "auto_merge": conf.get("auto_merge", "editblock"),
3007
- "human_as_model": conf.get("human_as_model", "false") == "true",
3008
2412
  "skip_build_index": conf.get("skip_build_index", "true") == "true",
3009
2413
  "skip_confirm": conf.get("skip_confirm", "true") == "true",
3010
- "silence": conf.get("silence", "true") == "true",
3011
- "include_project_structure": conf.get("include_project_structure", "true") == "true",
2414
+ "chat_model": conf.get("chat_model", ""),
2415
+ "code_model": conf.get("code_model", ""),
2416
+ "auto_merge": conf.get("auto_merge", "editblock"),
3012
2417
  "context": ""
3013
2418
  }
3014
2419
 
@@ -3133,28 +2538,26 @@ def commit_info(query: str, llm: AutoLLM):
3133
2538
  prepare_chat_yaml() # 复制上一个序号的 yaml 文件, 生成一个新的聊天 yaml 文件
3134
2539
 
3135
2540
  latest_yaml_file = get_last_yaml_file(os.path.join(args.source_dir, "actions"))
3136
-
3137
- conf = memory.get("conf", {})
3138
- current_files = memory["current_files"]["files"]
3139
2541
  execute_file = None
3140
2542
 
3141
2543
  if latest_yaml_file:
3142
2544
  try:
3143
2545
  execute_file = os.path.join(args.source_dir, "actions", latest_yaml_file)
2546
+ conf = memory.get("conf", {})
3144
2547
  yaml_config = {
3145
2548
  "include_file": ["./base/base.yml"],
3146
- "auto_merge": conf.get("auto_merge", "editblock"),
3147
- "human_as_model": conf.get("human_as_model", "false") == "true",
3148
2549
  "skip_build_index": conf.get("skip_build_index", "true") == "true",
3149
2550
  "skip_confirm": conf.get("skip_confirm", "true") == "true",
3150
- "silence": conf.get("silence", "true") == "true",
3151
- "include_project_structure": conf.get("include_project_structure", "true") == "true",
2551
+ "chat_model": conf.get("chat_model", ""),
2552
+ "code_model": conf.get("code_model", ""),
2553
+ "auto_merge": conf.get("auto_merge", "editblock")
3152
2554
  }
3153
2555
  for key, value in conf.items():
3154
2556
  converted_value = convert_config_value(key, value)
3155
2557
  if converted_value is not None:
3156
2558
  yaml_config[key] = converted_value
3157
2559
 
2560
+ current_files = memory["current_files"]["files"]
3158
2561
  yaml_config["urls"] = current_files
3159
2562
 
3160
2563
  # 临时保存yaml文件,然后读取yaml文件,更新args
@@ -3169,7 +2572,7 @@ def commit_info(query: str, llm: AutoLLM):
3169
2572
 
3170
2573
  # commit_message = ""
3171
2574
  commit_llm = llm
3172
- commit_llm.setup_default_model_name(memory["conf"]["current_chat_model"])
2575
+ commit_llm.setup_default_model_name(args.chat_model)
3173
2576
  console.print(f"Commit 信息生成中...", style="yellow")
3174
2577
 
3175
2578
  try:
@@ -3239,20 +2642,7 @@ def _generate_shell_script(user_input: str) -> str:
3239
2642
 
3240
2643
 
3241
2644
  def generate_shell_command(input_text: str, llm: AutoLLM) -> str | None:
3242
- conf = memory.get("conf", {})
3243
- yaml_config = {
3244
- "include_file": ["./base/base.yml"],
3245
- }
3246
- if "model" in conf:
3247
- yaml_config["model"] = conf["model"]
3248
- yaml_config["query"] = input_text
3249
-
3250
- yaml_content = convert_yaml_config_to_str(yaml_config=yaml_config)
3251
-
3252
- execute_file = os.path.join(args.source_dir, "actions", f"{uuid.uuid4()}.yml")
3253
-
3254
- with open(os.path.join(execute_file), "w") as f:
3255
- f.write(yaml_content)
2645
+ update_config_to_args(query=input_text, delete_execute_file=True)
3256
2646
 
3257
2647
  try:
3258
2648
  console.print(
@@ -3262,7 +2652,7 @@ def generate_shell_command(input_text: str, llm: AutoLLM) -> str | None:
3262
2652
  border_style="green",
3263
2653
  )
3264
2654
  )
3265
- llm.setup_default_model_name(memory["conf"]["current_code_model"])
2655
+ llm.setup_default_model_name(args.code_model)
3266
2656
  result = _generate_shell_script.with_llm(llm).run(user_input=input_text)
3267
2657
  shell_script = extract_code(result.output)[0][1]
3268
2658
  console.print(
@@ -3274,7 +2664,8 @@ def generate_shell_command(input_text: str, llm: AutoLLM) -> str | None:
3274
2664
  )
3275
2665
  return shell_script
3276
2666
  finally:
3277
- os.remove(execute_file)
2667
+ pass
2668
+ # os.remove(execute_file)
3278
2669
 
3279
2670
 
3280
2671
  def execute_shell_command(command: str):
@@ -3884,10 +3275,10 @@ def manage_models(models_args, models_data, llm: AutoLLM):
3884
3275
  logger.info(f"正在卸载 {remove_model_name} 模型")
3885
3276
  if llm.get_sub_client(remove_model_name):
3886
3277
  llm.remove_sub_client(remove_model_name)
3887
- if remove_model_name == memory["conf"]["current_chat_model"]:
3888
- logger.warning(f"当前首选 Chat 模型 {remove_model_name} 已被删除, 请立即 /conf current_chat_model: 调整 !!!")
3889
- if remove_model_name == memory["conf"]["current_code_model"]:
3890
- logger.warning(f"当前首选 Code 模型 {remove_model_name} 已被删除, 请立即 /conf current_code_model: 调整 !!!")
3278
+ if remove_model_name == memory["conf"]["chat_model"]:
3279
+ logger.warning(f"当前首选 Chat 模型 {remove_model_name} 已被删除, 请立即 /conf chat_model: 调整 !!!")
3280
+ if remove_model_name == memory["conf"]["code_model"]:
3281
+ logger.warning(f"当前首选 Code 模型 {remove_model_name} 已被删除, 请立即 /conf code_model: 调整 !!!")
3891
3282
 
3892
3283
 
3893
3284
  def configure_project_model():
@@ -3958,58 +3349,71 @@ def configure_project_model():
3958
3349
  )
3959
3350
 
3960
3351
 
3961
- def new_project(query, llm):
3962
- console.print(f"正在基于你的需求 {query} 构建项目 ...", style="bold green")
3963
- env_info = detect_env()
3964
- project = BuildNewProject(args=args, llm=llm,
3965
- chat_model=memory["conf"]["current_chat_model"],
3966
- code_model=memory["conf"]["current_code_model"])
3967
-
3968
- console.print(f"正在完善项目需求 ...", style="bold green")
3969
-
3970
- information = project.build_project_information(query, env_info, args.project_type)
3971
- if not information:
3972
- raise Exception(f"项目需求未正常生成 .")
3973
-
3974
- table = Table(title=f"{query}")
3975
- table.add_column("需求说明", style="cyan")
3976
- table.add_row(f"{information[:50]}...")
3977
- console.print(table)
3978
-
3979
- console.print(f"正在完善项目架构 ...", style="bold green")
3980
- architecture = project.build_project_architecture(query, env_info, args.project_type, information)
3981
-
3982
- console.print(f"正在构建项目索引 ...", style="bold green")
3983
- index_file_list = project.build_project_index(query, env_info, args.project_type, information, architecture)
3984
-
3985
- table = Table(title=f"索引列表")
3986
- table.add_column("路径", style="cyan")
3987
- table.add_column("用途", style="cyan")
3988
- for index_file in index_file_list.file_list:
3989
- table.add_row(index_file.file_path, index_file.purpose)
3990
- console.print(table)
3991
-
3992
- for index_file in index_file_list.file_list:
3993
- full_path = os.path.join(args.source_dir, index_file.file_path)
3994
-
3995
- # 获取目录路径
3996
- full_dir_path = os.path.dirname(full_path)
3997
- if not os.path.exists(full_dir_path):
3998
- os.makedirs(full_dir_path)
3999
-
4000
- console.print(f"正在编码: {full_path} ...", style="bold green")
4001
- code = project.build_single_code(query, env_info, args.project_type, information, architecture, index_file)
4002
-
4003
- with open(full_path, "w") as fp:
4004
- fp.write(code)
4005
-
4006
- # 生成 readme
4007
- readme_context = information + architecture
4008
- readme_path = os.path.join(args.source_dir, "README.md")
4009
- with open(readme_path, "w") as fp:
4010
- fp.write(readme_context)
4011
-
4012
- console.print(f"项目构建完成", style="bold green")
3352
+ # def new_project(query, llm):
3353
+ # console.print(f"正在基于你的需求 {query} 构建项目 ...", style="bold green")
3354
+ # env_info = detect_env()
3355
+ # project = BuildNewProject(args=args, llm=llm,
3356
+ # chat_model=memory["conf"]["chat_model"],
3357
+ # code_model=memory["conf"]["code_model"])
3358
+ #
3359
+ # console.print(f"正在完善项目需求 ...", style="bold green")
3360
+ #
3361
+ # information = project.build_project_information(query, env_info, args.project_type)
3362
+ # if not information:
3363
+ # raise Exception(f"项目需求未正常生成 .")
3364
+ #
3365
+ # table = Table(title=f"{query}")
3366
+ # table.add_column("需求说明", style="cyan")
3367
+ # table.add_row(f"{information[:50]}...")
3368
+ # console.print(table)
3369
+ #
3370
+ # console.print(f"正在完善项目架构 ...", style="bold green")
3371
+ # architecture = project.build_project_architecture(query, env_info, args.project_type, information)
3372
+ #
3373
+ # console.print(f"正在构建项目索引 ...", style="bold green")
3374
+ # index_file_list = project.build_project_index(query, env_info, args.project_type, information, architecture)
3375
+ #
3376
+ # table = Table(title=f"索引列表")
3377
+ # table.add_column("路径", style="cyan")
3378
+ # table.add_column("用途", style="cyan")
3379
+ # for index_file in index_file_list.file_list:
3380
+ # table.add_row(index_file.file_path, index_file.purpose)
3381
+ # console.print(table)
3382
+ #
3383
+ # for index_file in index_file_list.file_list:
3384
+ # full_path = os.path.join(args.source_dir, index_file.file_path)
3385
+ #
3386
+ # # 获取目录路径
3387
+ # full_dir_path = os.path.dirname(full_path)
3388
+ # if not os.path.exists(full_dir_path):
3389
+ # os.makedirs(full_dir_path)
3390
+ #
3391
+ # console.print(f"正在编码: {full_path} ...", style="bold green")
3392
+ # code = project.build_single_code(query, env_info, args.project_type, information, architecture, index_file)
3393
+ #
3394
+ # with open(full_path, "w") as fp:
3395
+ # fp.write(code)
3396
+ #
3397
+ # # 生成 readme
3398
+ # readme_context = information + architecture
3399
+ # readme_path = os.path.join(args.source_dir, "README.md")
3400
+ # with open(readme_path, "w") as fp:
3401
+ # fp.write(readme_context)
3402
+ #
3403
+ # console.print(f"项目构建完成", style="bold green")
3404
+
3405
+
3406
+ def is_old_version():
3407
+ """
3408
+ __version__ = "0.1.26" 开始使用兼容 AutoCoder 的 chat_model, code_model 参数
3409
+ 不再使用 current_chat_model 和 current_chat_model
3410
+ """
3411
+ if 'current_chat_model' in memory['conf'] and 'current_code_model' in memory['conf']:
3412
+ logger.warning(f"您当前版本使用的版本偏低, 正在进行配置兼容性处理")
3413
+ memory['conf']['chat_model'] = memory['conf']['current_chat_model']
3414
+ memory['conf']['code_model'] = memory['conf']['current_code_model']
3415
+ del memory['conf']['current_chat_model']
3416
+ del memory['conf']['current_code_model']
4013
3417
 
4014
3418
 
4015
3419
  def main():
@@ -4021,14 +3425,15 @@ def main():
4021
3425
  initialize_system()
4022
3426
 
4023
3427
  load_memory()
3428
+ is_old_version()
4024
3429
 
4025
3430
  if len(memory["models"]) == 0:
4026
3431
  _model_pass = input(f" 是否跳过模型配置(y/n): ").strip().lower()
4027
3432
  if _model_pass == "n":
4028
3433
  m1, m2, m3, m4 = configure_project_model()
4029
3434
  print_status(f"正在更新缓存...", "warning")
4030
- memory["conf"]["current_chat_model"] = m1
4031
- memory["conf"]["current_code_model"] = m1
3435
+ memory["conf"]["chat_model"] = m1
3436
+ memory["conf"]["code_model"] = m1
4032
3437
  memory["models"][m1] = {"base_url": m3, "api_key": m4, "model": m2}
4033
3438
  print_status(f"供应商配置已成功完成!后续你可以使用 /models 命令, 查看, 新增和修改所有模型", "success")
4034
3439
  else:
@@ -4046,10 +3451,10 @@ def main():
4046
3451
 
4047
3452
  print_status("初始化完成。", "success")
4048
3453
 
4049
- if memory["conf"]["current_chat_model"] not in memory["models"].keys():
4050
- print_status("首选 Chat 模型与部署模型不一致, 请使用 /conf current_chat_model:xxx 设置", "error")
4051
- if memory["conf"]["current_code_model"] not in memory["models"].keys():
4052
- print_status("首选 Code 模型与部署模型不一致, 请使用 /conf current_code_model:xxx 设置", "error")
3454
+ if memory["conf"]["chat_model"] not in memory["models"].keys():
3455
+ print_status("首选 Chat 模型与部署模型不一致, 请使用 /conf chat_model:xxx 设置", "error")
3456
+ if memory["conf"]["code_model"] not in memory["models"].keys():
3457
+ print_status("首选 Code 模型与部署模型不一致, 请使用 /conf code_model:xxx 设置", "error")
4053
3458
 
4054
3459
  MODES = {
4055
3460
  "normal": "正常模式",
@@ -4180,12 +3585,12 @@ def main():
4180
3585
  print("\033[91mPlease enter your request.\033[0m")
4181
3586
  continue
4182
3587
  coding(query=query, llm=auto_llm)
4183
- elif user_input.startswith("/new"):
4184
- query = user_input[len("/new"):].strip()
4185
- if not query:
4186
- print("\033[91mPlease enter your request.\033[0m")
4187
- continue
4188
- new_project(query=query, llm=auto_llm)
3588
+ # elif user_input.startswith("/new"):
3589
+ # query = user_input[len("/new"):].strip()
3590
+ # if not query:
3591
+ # print("\033[91mPlease enter your request.\033[0m")
3592
+ # continue
3593
+ # new_project(query=query, llm=auto_llm)
4189
3594
  elif user_input.startswith("/chat"):
4190
3595
  query = user_input[len("/chat"):].strip()
4191
3596
  if not query:
@@ -4223,9 +3628,9 @@ def main():
4223
3628
  break
4224
3629
  except Exception as e:
4225
3630
  print(f"\033[91m发生异常:\033[0m \033[93m{type(e).__name__}\033[0m - {str(e)}")
4226
- if runing_args and runing_args.debug:
4227
- import traceback
4228
- traceback.print_exc()
3631
+ # if runing_args and runing_args.debug:
3632
+ import traceback
3633
+ traceback.print_exc()
4229
3634
 
4230
3635
 
4231
3636
  if __name__ == '__main__':