auto-coder 0.1.260__py3-none-any.whl → 0.1.262__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

@@ -3,6 +3,16 @@ from byzerllm.utils import format_str_jinja2
3
3
 
4
4
  MESSAGES = {
5
5
  "en": {
6
+ "config_validation_error": "Config validation error: {{error}}",
7
+ "invalid_boolean_value": "Value '{{value}}' is not a valid boolean(true/false)",
8
+ "invalid_integer_value": "Value '{{value}}' is not a valid integer",
9
+ "invalid_float_value": "Value '{{value}}' is not a valid float",
10
+ "invalid_type_value": "Value '{{value}}' is not a valid type (expected: {{types}})",
11
+ "value_out_of_range": "Value {{value}} is out of allowed range({{min}}~{{max}})",
12
+ "invalid_choice": "Value '{{value}}' is not in allowed options({{allowed}})",
13
+ "unknown_config_key": "Unknown config key '{{key}}'",
14
+ "model_not_found": "Model '{{model}}' is not configured in models.yml",
15
+ "required_without_default": "Config key '{{key}}' requires explicit value",
6
16
  "auto_command_action_break": "Command {{command}} execution failed (got {{action}} result), no result can be obtained, please try again",
7
17
  "auto_command_break": "Auto command execution failed to execute command: {{command}}",
8
18
  "auto_command_executing": "\n\n============= Executing command: {{command}} =============\n\n",
@@ -123,9 +133,22 @@ MESSAGES = {
123
133
  "auto_command_failed": "Auto command failed: {{error}}. Please check your input and try again.",
124
134
  "command_execution_result": "{{action}} execution result",
125
135
  "satisfied_prompt": "Requirements satisfied, no further action needed",
126
- "auto_command_analyzed": "Selected command"
127
- },
136
+ "auto_command_analyzed": "Selected command",
137
+ "invalid_enum_value": "Value '{{value}}' is not in allowed values ({{allowed}})",
138
+ "no_changes_made": "⚠️ no changes made, the reason may be that the text block generated by the coding function has a problem, so it cannot be merged into the project",
139
+ "conversation_pruning_start": "Conversation pruning started, total tokens: {{total_tokens}}, safe zone: {{safe_zone}}",
140
+ },
128
141
  "zh": {
142
+ "config_validation_error": "配置验证错误: {{error}}",
143
+ "invalid_boolean_value": "值 '{{value}}' 不是有效的布尔值(true/false)",
144
+ "invalid_integer_value": "值 '{{value}}' 不是有效的整数",
145
+ "invalid_float_value": "值 '{{value}}' 不是有效的浮点数",
146
+ "invalid_type_value": "值 '{{value}}' 不是有效的类型 (期望: {{types}})",
147
+ "value_out_of_range": "值 {value} 超出允许范围({min}~{max})",
148
+ "invalid_choice": "值 '{value}' 不在允许选项中({allowed})",
149
+ "unknown_config_key": "未知的配置项 '{key}'",
150
+ "model_not_found": "模型 '{model}' 未在 models.yml 中配置",
151
+ "required_without_default": "配置项 '{key}' 需要明确设置值",
129
152
  "auto_command_action_break": "命令 {{command}} 执行失败(获取到了 {{action}} 的结果),无法获得任何结果,请重试",
130
153
  "auto_command_break": "自动命令执行失败: {{command}}",
131
154
  "auto_command_executing": "\n\n============= 正在执行指令: {{command}} =============\n\n",
@@ -213,7 +236,7 @@ MESSAGES = {
213
236
  "unmerged_blocks_warning": "⚠️ 发现 {{ num_blocks }} 个未合并的代码块,更改将不会被应用。请手动检查后重试。",
214
237
  "pylint_file_check_failed": "⚠️ {{ file_path }} 的 Pylint 检查失败。更改未应用。错误: {{ error_message }}",
215
238
  "merge_success": "✅ 成功合并了 {{ num_files }} 个文件中的更改 {{ num_changes }}/{{ total_blocks }} 个代码块。",
216
- "no_changes_made": "⚠️ 未对任何文件进行更改。",
239
+ "no_changes_made": "⚠️ 未对任何文件进行更改。这个原因可能是因为coding函数生成的文本块格式有问题,导致无法合并进项目",
217
240
  "unmerged_blocks_title": "未合并代码块",
218
241
  "unmerged_file_path": "文件: {{file_path}}",
219
242
  "unmerged_search_block": "Search Block({{similarity}}):",
@@ -245,7 +268,9 @@ MESSAGES = {
245
268
  "auto_command_failed": "自动命令执行失败: {{error}}。请检查您的输入并重试。",
246
269
  "command_execution_result": "{{action}} 执行结果",
247
270
  "satisfied_prompt": "已满足需求,无需进一步操作",
248
- "auto_command_analyzed": "被选择指令"
271
+ "auto_command_analyzed": "被选择指令",
272
+ "invalid_enum_value": "值 '{{value}}' 不在允许的值列表中 ({{allowed}})",
273
+ "conversation_pruning_start": "⚠️ 对话长度 {{total_tokens}} tokens 超过安全阈值 {{safe_zone}},开始修剪对话。"
249
274
  }}
250
275
 
251
276
 
@@ -157,7 +157,7 @@ class CodeAutoMerge:
157
157
  # get the file name
158
158
  file_name = os.path.basename(self.args.file)
159
159
 
160
- if not force_skip_git:
160
+ if not force_skip_git and not self.args.skip_commit:
161
161
  try:
162
162
  git_utils.commit_changes(self.args.source_dir, f"auto_coder_pre_{file_name}_{md5}")
163
163
  except Exception as e:
@@ -176,6 +176,6 @@ class CodeAutoMerge:
176
176
  f.write(block.content)
177
177
 
178
178
  self.printer.print_in_terminal("files_merged", total=total)
179
- if not force_skip_git:
179
+ if not force_skip_git and not self.args.skip_commit:
180
180
  commit_result = git_utils.commit_changes(self.args.source_dir, f"auto_coder_{file_name}_{md5}\n{self.args.query}")
181
181
  git_utils.print_commit_info(commit_result=commit_result)
@@ -511,7 +511,7 @@ class CodeAutoMergeDiff:
511
511
  # get the file name
512
512
  file_name = os.path.basename(self.args.file)
513
513
 
514
- if not force_skip_git:
514
+ if not force_skip_git and not self.args.skip_commit:
515
515
  try:
516
516
  git_utils.commit_changes(self.args.source_dir, f"auto_coder_pre_{file_name}_{md5}")
517
517
  except Exception as e:
@@ -522,6 +522,6 @@ class CodeAutoMergeDiff:
522
522
  self.apply_edits(edits)
523
523
 
524
524
  self.printer.print_in_terminal("files_merged_total", total=total)
525
- if not force_skip_git:
525
+ if not force_skip_git and not self.args.skip_commit:
526
526
  commit_result = git_utils.commit_changes(self.args.source_dir, f"auto_coder_{file_name}_{md5}\n{self.args.query}")
527
527
  git_utils.print_commit_info(commit_result=commit_result)
@@ -360,7 +360,7 @@ class CodeAutoMergeEditBlock:
360
360
  file_path=file_path,
361
361
  error_message=error_message)
362
362
 
363
- if changes_made and not force_skip_git:
363
+ if changes_made and not force_skip_git and not self.args.skip_commit:
364
364
  try:
365
365
  git_utils.commit_changes(
366
366
  self.args.source_dir, f"auto_coder_pre_{file_name}_{md5}"
@@ -400,7 +400,7 @@ class CodeAutoMergeEditBlock:
400
400
  )
401
401
 
402
402
  if changes_made:
403
- if not force_skip_git:
403
+ if not force_skip_git and not self.args.skip_commit:
404
404
  try:
405
405
  commit_result = git_utils.commit_changes(
406
406
  self.args.source_dir, f"auto_coder_{file_name}_{md5}\n{self.args.query}"
@@ -209,7 +209,7 @@ class CodeAutoMergeStrictDiff:
209
209
  # get the file name
210
210
  file_name = os.path.basename(self.args.file)
211
211
 
212
- if not force_skip_git:
212
+ if not force_skip_git and not self.args.skip_commit:
213
213
  try:
214
214
  git_utils.commit_changes(self.args.source_dir, f"auto_coder_pre_{file_name}_{md5}")
215
215
  except Exception as e:
@@ -232,7 +232,7 @@ class CodeAutoMergeStrictDiff:
232
232
  raise Exception("Error applying diff to file: " + path)
233
233
 
234
234
  self.printer.print_in_terminal("files_merged_total", total=total)
235
- if not force_skip_git:
235
+ if not force_skip_git and not self.args.skip_commit:
236
236
  commit_result = git_utils.commit_changes(self.args.source_dir, f"auto_coder_{file_name}_{md5}\n{self.args.query}")
237
237
  git_utils.print_commit_info(commit_result=commit_result)
238
238
 
@@ -484,16 +484,7 @@ class CommandCompleter(Completer):
484
484
  current_word = parser.current_word()
485
485
  for command in parser.get_sub_commands():
486
486
  if command.startswith(current_word):
487
- yield Completion(command, start_position=-len(current_word))
488
-
489
- elif words[0] == "/coding":
490
- new_text = text[len("/coding"):]
491
- parser = CommandTextParser(new_text, words[0])
492
- parser.lib()
493
- current_word = parser.current_word()
494
- for command in parser.get_sub_commands():
495
- if command.startswith(current_word):
496
- yield Completion(command, start_position=-len(current_word))
487
+ yield Completion(command, start_position=-len(current_word))
497
488
 
498
489
  elif words[0] == "/conf":
499
490
  new_words = text[len("/conf"):].strip().split()
@@ -0,0 +1,245 @@
1
+ from typing import Any
2
+ from autocoder.utils import llms as llms_utils
3
+ from autocoder.common.auto_coder_lang import get_message_with_format
4
+
5
+ class ConfigValidationError(Exception):
6
+ def __init__(self, message: str):
7
+ self.message = message
8
+ super().__init__(message)
9
+
10
+ class ConfigValidator:
11
+ CONFIG_SPEC = {
12
+ # 核心配置项
13
+ "auto_merge": {
14
+ "type": str,
15
+ "allowed": ["editblock", "diff", "wholefile"],
16
+ "default": "editblock",
17
+ "description": "代码合并方式(editblock/diff/wholefile)"
18
+ },
19
+ "editblock_similarity": {
20
+ "type": float,
21
+ "min": 0.0,
22
+ "max": 1.0,
23
+ "default": 0.9,
24
+ "description": "代码块相似度阈值(0-1)"
25
+ },
26
+ "generate_times_same_model": {
27
+ "type": int,
28
+ "min": 1,
29
+ "max": 5,
30
+ "default": 1,
31
+ "description": "同模型生成次数(1-5)"
32
+ },
33
+ "skip_filter_index": {
34
+ "type": bool,
35
+ "default": False,
36
+ "description": "是否跳过根据用户的query自动查找上下文"
37
+ },
38
+ "skip_build_index": {
39
+ "type": bool,
40
+ "default": True,
41
+ "description": "是否自动构建索引"
42
+ },
43
+ "enable_global_memory": {
44
+ "type": bool,
45
+ "default": True,
46
+ "description": "是否开启全局记忆"
47
+ },
48
+ "rank_times_same_model": {
49
+ "type": int,
50
+ "min": 1,
51
+ "max": 3,
52
+ "default": 1,
53
+ "description": "相同模型重排序次数"
54
+ },
55
+ "human_as_model": {
56
+ "type": bool,
57
+ "default": False,
58
+ "description": "是否以人类作为模型"
59
+ },
60
+ "skip_confirm": {
61
+ "type": bool,
62
+ "default": True,
63
+ "description": "是否跳过确认步骤"
64
+ },
65
+ "silence": {
66
+ "type": bool,
67
+ "default": True,
68
+ "description": "是否静默模式"
69
+ },
70
+ "include_project_structure": {
71
+ "type": bool,
72
+ "default": True,
73
+ "description": "是否包含项目结构"
74
+ },
75
+ "product_mode": {
76
+ "type": str,
77
+ "allowed": ["lite", "pro"],
78
+ "default": "lite",
79
+ "description": "产品模式(lite/pro)"
80
+ },
81
+ "model": {
82
+ "type": str,
83
+ "default": "v3_chat",
84
+ "description": "默认模型名称"
85
+ },
86
+ "chat_model": {
87
+ "type": str,
88
+ "default": "r1_chat",
89
+ "description": "聊天模型名称"
90
+ },
91
+ "code_model": {
92
+ "type": str,
93
+ "default": "v3_chat",
94
+ "description": "代码生成模型名称"
95
+ },
96
+ "index_filter_model": {
97
+ "type": str,
98
+ "default": "r1_chat",
99
+ "description": "索引过滤模型名称"
100
+ },
101
+ "generate_rerank_model": {
102
+ "type": str,
103
+ "default": "r1_chat",
104
+ "description": "生成重排序模型名称"
105
+ },
106
+ "emb_model": {
107
+ "type": str,
108
+ "default": "v3_chat",
109
+ "description": "嵌入模型名称"
110
+ },
111
+ "vl_model": {
112
+ "type": str,
113
+ "default": "v3_chat",
114
+ "description": "视觉语言模型名称"
115
+ },
116
+ "designer_model": {
117
+ "type": str,
118
+ "default": "v3_chat",
119
+ "description": "设计模型名称"
120
+ },
121
+ "sd_model": {
122
+ "type": str,
123
+ "default": "v3_chat",
124
+ "description": "稳定扩散模型名称"
125
+ },
126
+ "voice2text_model": {
127
+ "type": str,
128
+ "default": "v3_chat",
129
+ "description": "语音转文本模型名称"
130
+ },
131
+ "commit_model": {
132
+ "type": str,
133
+ "default": "v3_chat",
134
+ "description": "提交信息生成模型名称"
135
+ }
136
+ }
137
+
138
+ @classmethod
139
+ def validate(cls, key: str, value: Any, product_mode: str) -> Any:
140
+ # 获取配置规范
141
+ spec = cls.CONFIG_SPEC.get(key)
142
+ if not spec:
143
+ # raise ConfigValidationError(
144
+ # get_message_with_format("unknown_config_key", key=key)
145
+ # )
146
+ return
147
+
148
+ # 类型转换和验证
149
+ try:
150
+ # 布尔类型特殊处理
151
+ if isinstance(spec['type'], (list, tuple)):
152
+ # 多个类型支持
153
+ for type_ in spec['type']:
154
+ try:
155
+ if type_ == bool:
156
+ return cls.validate_boolean(value)
157
+ converted_value = type_(value)
158
+ break
159
+ except ValueError:
160
+ continue
161
+ else:
162
+ types_str = ', '.join([t.__name__ for t in spec['type']])
163
+ raise ConfigValidationError(
164
+ get_message_with_format(f"invalid_type_value",
165
+ value=value,
166
+ types=types_str)
167
+ )
168
+ else:
169
+ # 单个类型处理
170
+ if spec['type'] == bool:
171
+ return cls.validate_boolean(value)
172
+ converted_value = spec['type'](value)
173
+ except ValueError:
174
+ type_name = spec['type'].__name__ if not isinstance(spec['type'], (list, tuple)) else ', '.join([t.__name__ for t in spec['type']])
175
+ raise ConfigValidationError(
176
+ get_message_with_format(f"invalid_type_value",
177
+ value=value,
178
+ types=type_name)
179
+ )
180
+
181
+ # 范围检查
182
+ if 'min' in spec and converted_value < spec['min']:
183
+ raise ConfigValidationError(
184
+ get_message_with_format("value_out_of_range",
185
+ value=converted_value,
186
+ min=spec['min'],
187
+ max=spec['max'])
188
+ )
189
+
190
+ if 'max' in spec and converted_value > spec['max']:
191
+ raise ConfigValidationError(
192
+ get_message_with_format("value_out_of_range",
193
+ value=converted_value,
194
+ min=spec['min'],
195
+ max=spec['max'])
196
+ )
197
+
198
+ # 枚举值检查
199
+ if 'allowed' in spec and converted_value not in spec['allowed']:
200
+ raise ConfigValidationError(
201
+ get_message_with_format("invalid_enum_value",
202
+ value=converted_value,
203
+ allowed=', '.join(map(str, spec['allowed'])))
204
+ )
205
+
206
+ # 模型存在性检查
207
+ if product_mode == "lite" and key in ["chat_model","code_model",
208
+ "index_filter_model", "generate_rerank_model",
209
+ "rank_times_same_model",
210
+ "emb_model", "vl_model", "designer_model", "sd_model",
211
+ "voice2text_model",
212
+ "commit_model","model"]:
213
+ if not llms_utils.get_model_info(converted_value,product_mode):
214
+ raise ConfigValidationError(
215
+ get_message_with_format("model_not_found", model=converted_value)
216
+ )
217
+
218
+ return converted_value
219
+
220
+ @staticmethod
221
+ def validate_boolean(value: str) -> bool:
222
+ if value.lower() in ("true", "1", "yes"):
223
+ return True
224
+ if value.lower() in ("false", "0", "no"):
225
+ return False
226
+ raise ConfigValidationError(
227
+ get_message_with_format("invalid_boolean_value", value=value)
228
+ )
229
+
230
+ @classmethod
231
+ def get_config_docs(cls) -> str:
232
+ """生成配置项文档"""
233
+ docs = ["可用配置项:"]
234
+ for key, spec in cls.CONFIG_SPEC.items():
235
+ desc = [
236
+ f"- {key}: {spec['description']}",
237
+ f" 类型: {spec['type'].__name__}",
238
+ f" 默认值: {spec['default']}"
239
+ ]
240
+ if "allowed" in spec:
241
+ desc.append(f" 允许值: {', '.join(spec['allowed'])}")
242
+ if "min" in spec and "max" in spec:
243
+ desc.append(f" 取值范围: {spec['min']}~{spec['max']}")
244
+ docs.append("\n".join(desc))
245
+ return "\n\n".join(docs)
@@ -0,0 +1,131 @@
1
+ from typing import List, Dict, Any, Union
2
+ import json
3
+ from pydantic import BaseModel
4
+ import byzerllm
5
+ from autocoder.common.printer import Printer
6
+ from autocoder.utils.llms import count_tokens
7
+ from loguru import logger
8
+
9
+ class PruneStrategy(BaseModel):
10
+ name: str
11
+ description: str
12
+ config: Dict[str, Any] = {"safe_zone_tokens": 0, "group_size": 4}
13
+
14
+ class ConversationPruner:
15
+ def __init__(self, llm: Union[byzerllm.ByzerLLM, byzerllm.SimpleByzerLLM],
16
+ safe_zone_tokens: int = 500, group_size: int = 4):
17
+ self.llm = llm
18
+ self.printer = Printer()
19
+ self.strategies = {
20
+ "summarize": PruneStrategy(
21
+ name="summarize",
22
+ description="对早期对话进行分组摘要,保留关键信息",
23
+ config={"safe_zone_tokens": safe_zone_tokens, "group_size": group_size}
24
+ ),
25
+ "truncate": PruneStrategy(
26
+ name="truncate",
27
+ description="分组截断最早的部分对话",
28
+ config={"safe_zone_tokens": safe_zone_tokens, "group_size": group_size}
29
+ ),
30
+ "hybrid": PruneStrategy(
31
+ name="hybrid",
32
+ description="先尝试分组摘要,如果仍超限则分组截断",
33
+ config={"safe_zone_tokens": safe_zone_tokens, "group_size": group_size}
34
+ )
35
+ }
36
+
37
+ def get_available_strategies(self) -> List[Dict[str, Any]]:
38
+ """获取所有可用策略"""
39
+ return [strategy.dict() for strategy in self.strategies.values()]
40
+
41
+ def prune_conversations(self, conversations: List[Dict[str, Any]],
42
+ strategy_name: str = "summarize") -> List[Dict[str, Any]]:
43
+ """
44
+ 根据策略修剪对话
45
+ Args:
46
+ conversations: 原始对话列表
47
+ strategy_name: 策略名称
48
+ Returns:
49
+ 修剪后的对话列表
50
+ """
51
+ current_tokens = count_tokens(json.dumps(conversations, ensure_ascii=False))
52
+ if current_tokens <= self.args.conversation_prune_safe_zone_tokens:
53
+ return conversations
54
+
55
+ strategy = self.strategies.get(strategy_name, self.strategies["summarize"])
56
+
57
+ if strategy.name == "summarize":
58
+ return self._summarize_prune(conversations, strategy.config)
59
+ elif strategy.name == "truncate":
60
+ return self._truncate_prune.with_llm(self.llm).run(conversations)
61
+ elif strategy.name == "hybrid":
62
+ pruned = self._summarize_prune(conversations, strategy.config)
63
+ if count_tokens(json.dumps(pruned, ensure_ascii=False)) > self.args.conversation_prune_safe_zone_tokens:
64
+ return self._truncate_prune(pruned)
65
+ return pruned
66
+ else:
67
+ logger.warning(f"Unknown strategy: {strategy_name}, using summarize instead")
68
+ return self._summarize_prune(conversations, strategy.config)
69
+
70
+ def _summarize_prune(self, conversations: List[Dict[str, Any]],
71
+ config: Dict[str, Any]) -> List[Dict[str, Any]]:
72
+ """摘要式剪枝"""
73
+ safe_zone_tokens = config.get("safe_zone_tokens", 50*1024)
74
+ group_size = config.get("group_size", 4)
75
+ processed_conversations = conversations.copy()
76
+
77
+ while True:
78
+ current_tokens = count_tokens(json.dumps(processed_conversations, ensure_ascii=False))
79
+ if current_tokens <= safe_zone_tokens:
80
+ break
81
+
82
+ # 找到要处理的对话组
83
+ early_conversations = processed_conversations[:-group_size]
84
+ recent_conversations = processed_conversations[-group_size:]
85
+
86
+ if not early_conversations:
87
+ break
88
+
89
+ # 生成当前组的摘要
90
+ group_summary = self._generate_summary.with_llm(self.llm).run(early_conversations[-group_size:])
91
+
92
+ # 更新对话历史
93
+ processed_conversations = early_conversations[:-group_size] + [
94
+ {"role": "user", "content": f"历史对话摘要:\n{group_summary}"},
95
+ {"role": "assistant", "content": f"收到"}
96
+ ] + recent_conversations
97
+
98
+ return processed_conversations
99
+
100
+ @byzerllm.prompt()
101
+ def _generate_summary(self, conversations: List[Dict[str, Any]]) -> str:
102
+ '''
103
+ 请用中文将以下对话浓缩为要点,保留关键决策和技术细节:
104
+
105
+ <history_conversations>
106
+ {{conversations}}
107
+ </history_conversations>
108
+ '''
109
+ return {
110
+ "conversations": json.dumps(conversations, ensure_ascii=False)
111
+ }
112
+
113
+ def _truncate_prune(self, conversations: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
114
+ """截断式剪枝"""
115
+ safe_zone_tokens = self.strategies["truncate"].config.get("safe_zone_tokens", 0)
116
+ group_size = self.strategies["truncate"].config.get("group_size", 4)
117
+ processed_conversations = conversations.copy()
118
+
119
+ while True:
120
+ current_tokens = count_tokens(json.dumps(processed_conversations, ensure_ascii=False))
121
+ if current_tokens <= safe_zone_tokens:
122
+ break
123
+
124
+ # 如果剩余对话不足一组,直接返回
125
+ if len(processed_conversations) <= group_size:
126
+ return []
127
+
128
+ # 移除最早的一组对话
129
+ processed_conversations = processed_conversations[group_size:]
130
+
131
+ return processed_conversations
@@ -0,0 +1,101 @@
1
+ import os
2
+ import json
3
+ import shutil
4
+ from loguru import logger
5
+ from autocoder.common.printer import Printer
6
+
7
+
8
+ def export_index(project_root: str, export_path: str) -> bool:
9
+ printer = Printer()
10
+ """
11
+ Export index.json with absolute paths converted to relative paths
12
+
13
+ Args:
14
+ project_root: Project root directory
15
+ export_path: Path to export the index file
16
+
17
+ Returns:
18
+ bool: True if successful, False otherwise
19
+ """
20
+ try:
21
+ index_path = os.path.join(project_root, ".auto-coder", "index.json")
22
+ if not os.path.exists(index_path):
23
+ printer.print_in_terminal("index_not_found", path=index_path)
24
+ return False
25
+
26
+ # Read and convert paths
27
+ with open(index_path, "r") as f:
28
+ index_data = json.load(f)
29
+
30
+ # Convert absolute paths to relative
31
+ converted_data = {}
32
+ for abs_path, data in index_data.items():
33
+ try:
34
+ rel_path = os.path.relpath(abs_path, project_root)
35
+ data["module_name"] = rel_path
36
+ converted_data[rel_path] = data
37
+ except ValueError:
38
+ printer.print_in_terminal("index_convert_path_fail", path=abs_path)
39
+ converted_data[abs_path] = data
40
+
41
+ # Write to export location
42
+ export_file = os.path.join(export_path, "index.json")
43
+ os.makedirs(export_path, exist_ok=True)
44
+ with open(export_file, "w") as f:
45
+ json.dump(converted_data, f, indent=2)
46
+
47
+ return True
48
+
49
+ except Exception as e:
50
+ printer.print_in_terminal("index_error", error=str(e))
51
+ return False
52
+
53
+ def import_index(project_root: str, import_path: str) -> bool:
54
+ printer = Printer()
55
+ """
56
+ Import index.json with relative paths converted to absolute paths
57
+
58
+ Args:
59
+ project_root: Project root directory
60
+ import_path: Path containing the index file to import
61
+
62
+ Returns:
63
+ bool: True if successful, False otherwise
64
+ """
65
+ try:
66
+ import_file = os.path.join(import_path, "index.json")
67
+ if not os.path.exists(import_file):
68
+ printer.print_in_terminal("index_not_found", path=import_file)
69
+ return False
70
+
71
+ # Read and convert paths
72
+ with open(import_file, "r") as f:
73
+ index_data = json.load(f)
74
+
75
+ # Convert relative paths to absolute
76
+ converted_data = {}
77
+ for rel_path, data in index_data.items():
78
+ try:
79
+ abs_path = os.path.join(project_root, rel_path)
80
+ data["module_name"] = abs_path
81
+ converted_data[abs_path] = data
82
+ except Exception:
83
+ printer.print_in_terminal("index_convert_path_fail", path=rel_path)
84
+ converted_data[rel_path] = data
85
+
86
+ # Backup existing index
87
+ index_path = os.path.join(project_root, ".auto-coder", "index.json")
88
+ if os.path.exists(index_path):
89
+ backup_path = index_path + ".bak"
90
+ shutil.copy2(index_path, backup_path)
91
+ printer.print_in_terminal("index_backup_success", path=backup_path)
92
+
93
+ # Write new index
94
+ with open(index_path, "w") as f:
95
+ json.dump(converted_data, f, indent=2)
96
+
97
+ return True
98
+
99
+ except Exception as e:
100
+ printer.print_in_terminal("index_error", error=str(e))
101
+ return False