auto-coder 0.1.264__py3-none-any.whl → 0.1.265__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.264.dist-info → auto_coder-0.1.265.dist-info}/METADATA +1 -1
- {auto_coder-0.1.264.dist-info → auto_coder-0.1.265.dist-info}/RECORD +50 -48
- autocoder/agent/planner.py +4 -4
- autocoder/auto_coder.py +26 -21
- autocoder/auto_coder_server.py +7 -7
- autocoder/chat_auto_coder.py +150 -49
- autocoder/commands/auto_command.py +81 -4
- autocoder/commands/tools.py +48 -50
- autocoder/common/__init__.py +0 -1
- autocoder/common/auto_coder_lang.py +37 -3
- autocoder/common/code_auto_generate.py +3 -3
- autocoder/common/code_auto_generate_diff.py +3 -6
- autocoder/common/code_auto_generate_editblock.py +3 -3
- autocoder/common/code_auto_generate_strict_diff.py +3 -3
- autocoder/common/code_auto_merge_diff.py +2 -2
- autocoder/common/code_auto_merge_editblock.py +1 -1
- autocoder/common/code_auto_merge_strict_diff.py +3 -3
- autocoder/common/command_completer.py +3 -0
- autocoder/common/command_generator.py +24 -8
- autocoder/common/command_templates.py +2 -2
- autocoder/common/conf_import_export.py +105 -0
- autocoder/common/conf_validator.py +1 -1
- autocoder/common/files.py +41 -2
- autocoder/common/image_to_page.py +11 -11
- autocoder/common/index_import_export.py +38 -18
- autocoder/common/mcp_hub.py +3 -3
- autocoder/common/mcp_server.py +2 -2
- autocoder/common/shells.py +254 -13
- autocoder/common/stats_panel.py +126 -0
- autocoder/dispacher/actions/action.py +6 -18
- autocoder/dispacher/actions/copilot.py +2 -2
- autocoder/dispacher/actions/plugins/action_regex_project.py +1 -3
- autocoder/dispacher/actions/plugins/action_translate.py +1 -1
- autocoder/index/index.py +5 -5
- autocoder/models.py +2 -2
- autocoder/pyproject/__init__.py +5 -5
- autocoder/rag/cache/byzer_storage_cache.py +4 -4
- autocoder/rag/cache/file_monitor_cache.py +2 -2
- autocoder/rag/cache/simple_cache.py +4 -4
- autocoder/rag/long_context_rag.py +2 -2
- autocoder/regexproject/__init__.py +3 -2
- autocoder/suffixproject/__init__.py +3 -2
- autocoder/tsproject/__init__.py +3 -2
- autocoder/utils/conversation_store.py +1 -1
- autocoder/utils/operate_config_api.py +3 -3
- autocoder/version.py +1 -1
- {auto_coder-0.1.264.dist-info → auto_coder-0.1.265.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.264.dist-info → auto_coder-0.1.265.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.264.dist-info → auto_coder-0.1.265.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.264.dist-info → auto_coder-0.1.265.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
from rich.console import Console
|
|
10
|
+
from rich.panel import Panel
|
|
11
|
+
from rich.columns import Columns
|
|
12
|
+
from rich.text import Text
|
|
13
|
+
import math
|
|
14
|
+
|
|
15
|
+
class StatsPanel:
|
|
16
|
+
def __init__(self, console: Console = None):
|
|
17
|
+
self.console = console if console else Console()
|
|
18
|
+
|
|
19
|
+
def _format_speed_bar(self, speed: float) -> Text:
|
|
20
|
+
"""生成速度可视化进度条(保持原30-60区间)"""
|
|
21
|
+
if speed < 30:
|
|
22
|
+
color = "red"
|
|
23
|
+
level = "低"
|
|
24
|
+
elif 30 <= speed < 60:
|
|
25
|
+
color = "yellow"
|
|
26
|
+
level = "中"
|
|
27
|
+
else:
|
|
28
|
+
color = "green"
|
|
29
|
+
level = "高"
|
|
30
|
+
|
|
31
|
+
bar_length = min(int(speed), 100)
|
|
32
|
+
bar = Text("▮" * bar_length, style=color)
|
|
33
|
+
bar.append(f" {speed:.1f} tokens/s ({level})", style="bold white")
|
|
34
|
+
return bar
|
|
35
|
+
|
|
36
|
+
def _format_progress_bar(self, value: int, max_value: int, label: str, color: str) -> Text:
|
|
37
|
+
"""生成通用进度条"""
|
|
38
|
+
progress = min(value / max_value, 1.0)
|
|
39
|
+
bar_length = int(progress * 20)
|
|
40
|
+
bar = Text("▮" * bar_length, style=color)
|
|
41
|
+
bar.append(f" {value} ({label})", style="bold white")
|
|
42
|
+
return bar
|
|
43
|
+
|
|
44
|
+
def generate(
|
|
45
|
+
self,
|
|
46
|
+
model_names: str,
|
|
47
|
+
duration: float,
|
|
48
|
+
sampling_count: int,
|
|
49
|
+
input_tokens: int,
|
|
50
|
+
output_tokens: int,
|
|
51
|
+
input_cost: float,
|
|
52
|
+
output_cost: float,
|
|
53
|
+
speed: float,
|
|
54
|
+
) -> None:
|
|
55
|
+
"""新版紧凑布局"""
|
|
56
|
+
# 复合标题(带图标和关键数据)
|
|
57
|
+
title = Text.assemble(
|
|
58
|
+
"📊 ", ("代码生成统计", "bold cyan underline"),
|
|
59
|
+
" │ ⚡", (f"{speed:.1f}t/s ", "bold green"),
|
|
60
|
+
"│ 💰", (f"${input_cost + output_cost:.4f}", "bold yellow")
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
# 处理耗时颜色逻辑(新增15-30-60区间)
|
|
64
|
+
duration_color = "green"
|
|
65
|
+
if 15 <= duration < 30:
|
|
66
|
+
duration_color = "yellow"
|
|
67
|
+
elif duration >= 30:
|
|
68
|
+
duration_color = "red"
|
|
69
|
+
|
|
70
|
+
# 处理成本颜色逻辑(新增0.5-1区间)
|
|
71
|
+
def get_cost_color(cost: float) -> str:
|
|
72
|
+
if cost < 0.5: return "green"
|
|
73
|
+
elif 0.5 <= cost < 1: return "yellow"
|
|
74
|
+
else: return "red"
|
|
75
|
+
|
|
76
|
+
# 紧凑网格布局
|
|
77
|
+
grid = [
|
|
78
|
+
Panel(
|
|
79
|
+
Text.assemble(
|
|
80
|
+
("🤖 模型: ", "bold"), model_names + "\n",
|
|
81
|
+
self._format_mini_progress(duration, 60.0, duration_color), # 耗时max=60
|
|
82
|
+
(" ⏱", duration_color), f" {duration:.1f}s │ ",
|
|
83
|
+
self._format_mini_progress(sampling_count, 100, "blue"),
|
|
84
|
+
(" 🔢", "blue"), f" {sampling_count}\n",
|
|
85
|
+
("📥", "green"), " ",
|
|
86
|
+
self._format_mini_progress(input_tokens, 65536.0, "green"), # token分母改为65536
|
|
87
|
+
f" {input_tokens} ({input_tokens/65536*100:.2f}%) │ ", # 新增百分比显示
|
|
88
|
+
("📤", "bright_green"), " ",
|
|
89
|
+
self._format_mini_progress(output_tokens, 65536.0, "bright_green"),
|
|
90
|
+
f" {output_tokens} ({output_tokens/65536*100:.2f}%)" # 新增百分比显示
|
|
91
|
+
),
|
|
92
|
+
border_style="cyan",
|
|
93
|
+
padding=(0, 2)
|
|
94
|
+
),
|
|
95
|
+
Panel(
|
|
96
|
+
Text.assemble(
|
|
97
|
+
("💵 成本: ", "bold"),
|
|
98
|
+
self._format_mini_progress(input_cost, 1.0, get_cost_color(input_cost)), # 成本max=1
|
|
99
|
+
(" IN", get_cost_color(input_cost)), f" {input_cost:.3f}\n",
|
|
100
|
+
("💸 ", "bold"),
|
|
101
|
+
self._format_mini_progress(output_cost, 1.0, get_cost_color(output_cost)),
|
|
102
|
+
(" OUT", get_cost_color(output_cost)), f" {output_cost:.3f}\n",
|
|
103
|
+
self._format_speed_bar(speed)
|
|
104
|
+
),
|
|
105
|
+
border_style="yellow",
|
|
106
|
+
padding=(0, 1)
|
|
107
|
+
)
|
|
108
|
+
]
|
|
109
|
+
|
|
110
|
+
# 组合布局
|
|
111
|
+
main_panel = Panel(
|
|
112
|
+
Columns(grid, equal=True, expand=True),
|
|
113
|
+
title=title,
|
|
114
|
+
border_style="bright_blue",
|
|
115
|
+
padding=(1, 2)
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
self.console.print(main_panel)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def _format_mini_progress(self, value: float, max_value: float, color: str) -> Text:
|
|
122
|
+
"""紧凑型进度条(支持浮点数)"""
|
|
123
|
+
progress = min(value / max_value, 1.0)
|
|
124
|
+
filled = "▮" * int(progress * 10)
|
|
125
|
+
empty = "▯" * (10 - len(filled))
|
|
126
|
+
return Text(filled + empty, style=color)
|
|
@@ -86,7 +86,7 @@ class ActionTSProject(BaseAction):
|
|
|
86
86
|
max_iter=self.args.image_max_iter,
|
|
87
87
|
)
|
|
88
88
|
html_code = ""
|
|
89
|
-
with open(html_path, "r") as f:
|
|
89
|
+
with open(html_path, "r",encoding="utf-8") as f:
|
|
90
90
|
html_code = f.read()
|
|
91
91
|
|
|
92
92
|
source_code_list.sources.append(SourceCode(
|
|
@@ -190,9 +190,7 @@ class ActionTSProject(BaseAction):
|
|
|
190
190
|
conversations=generate_result.conversations[0],
|
|
191
191
|
model=self.llm.default_model_name,
|
|
192
192
|
)
|
|
193
|
-
|
|
194
|
-
with open(args.target_file, "w") as file:
|
|
195
|
-
file.write(content)
|
|
193
|
+
|
|
196
194
|
|
|
197
195
|
|
|
198
196
|
class ActionPyScriptProject(BaseAction):
|
|
@@ -300,11 +298,7 @@ class ActionPyScriptProject(BaseAction):
|
|
|
300
298
|
instruction=self.args.query,
|
|
301
299
|
conversations=generate_result.conversations[0],
|
|
302
300
|
model=self.llm.default_model_name,
|
|
303
|
-
)
|
|
304
|
-
|
|
305
|
-
end_time = time.time()
|
|
306
|
-
with open(self.args.target_file, "w") as file:
|
|
307
|
-
file.write(content)
|
|
301
|
+
)
|
|
308
302
|
|
|
309
303
|
|
|
310
304
|
class ActionPyProject(BaseAction):
|
|
@@ -435,9 +429,7 @@ class ActionPyProject(BaseAction):
|
|
|
435
429
|
instruction=self.args.query,
|
|
436
430
|
conversations=generate_result.conversations[0],
|
|
437
431
|
model=self.llm.default_model_name,
|
|
438
|
-
)
|
|
439
|
-
with open(args.target_file, "w") as file:
|
|
440
|
-
file.write(content)
|
|
432
|
+
)
|
|
441
433
|
|
|
442
434
|
|
|
443
435
|
class ActionSuffixProject(BaseAction):
|
|
@@ -551,9 +543,7 @@ class ActionSuffixProject(BaseAction):
|
|
|
551
543
|
instruction=self.args.query,
|
|
552
544
|
conversations=merge_result.conversations[0],
|
|
553
545
|
model=self.llm.default_model_name,
|
|
554
|
-
)
|
|
555
|
-
with open(args.target_file, "w") as file:
|
|
556
|
-
file.write(content)
|
|
546
|
+
)
|
|
557
547
|
else:
|
|
558
548
|
content = generate_result.contents[0]
|
|
559
549
|
|
|
@@ -563,7 +553,5 @@ class ActionSuffixProject(BaseAction):
|
|
|
563
553
|
conversations=generate_result.conversations[0],
|
|
564
554
|
model=self.llm.default_model_name,
|
|
565
555
|
)
|
|
566
|
-
|
|
567
|
-
with open(args.target_file, "w") as file:
|
|
568
|
-
file.write(content)
|
|
556
|
+
|
|
569
557
|
|
|
@@ -343,7 +343,7 @@ class ActionCopilot:
|
|
|
343
343
|
logger.info(
|
|
344
344
|
"model is not specified and we will generate prompt to the target file"
|
|
345
345
|
)
|
|
346
|
-
with open(args.target_file, "w") as f:
|
|
346
|
+
with open(args.target_file, "w",encoding="utf-8") as f:
|
|
347
347
|
f.write(q)
|
|
348
348
|
return True
|
|
349
349
|
|
|
@@ -379,7 +379,7 @@ class ActionCopilot:
|
|
|
379
379
|
logger.info(result)
|
|
380
380
|
|
|
381
381
|
# 将结果写入文件
|
|
382
|
-
with open(args.target_file, "w") as f:
|
|
382
|
+
with open(args.target_file, "w",encoding="utf-8") as f:
|
|
383
383
|
f.write("=================CONVERSATION==================\n\n")
|
|
384
384
|
for conversation in conversations:
|
|
385
385
|
f.write(f"{conversation['role']}: {conversation['content']}\n")
|
|
@@ -209,6 +209,6 @@ class ActionTranslate:
|
|
|
209
209
|
new_filename = f"{filename}{new_file_mark}{extension}"
|
|
210
210
|
|
|
211
211
|
logger.info(f"Writing to {new_filename}...")
|
|
212
|
-
with open(new_filename, "w") as file:
|
|
212
|
+
with open(new_filename, "w",encoding="utf-8") as file:
|
|
213
213
|
file.write(readme.content)
|
|
214
214
|
return True
|
autocoder/index/index.py
CHANGED
|
@@ -342,7 +342,7 @@ class IndexManager:
|
|
|
342
342
|
|
|
343
343
|
def build_index(self):
|
|
344
344
|
if os.path.exists(self.index_file):
|
|
345
|
-
with open(self.index_file, "r") as file:
|
|
345
|
+
with open(self.index_file, "r",encoding="utf-8") as file:
|
|
346
346
|
index_data = json.load(file)
|
|
347
347
|
else:
|
|
348
348
|
index_data = {}
|
|
@@ -433,13 +433,13 @@ class IndexManager:
|
|
|
433
433
|
index_data[module_name] = result
|
|
434
434
|
updated_sources.append(module_name)
|
|
435
435
|
if len(updated_sources) > 5:
|
|
436
|
-
with open(self.index_file, "w") as file:
|
|
436
|
+
with open(self.index_file, "w",encoding="utf-8") as file:
|
|
437
437
|
json.dump(index_data, file, ensure_ascii=False, indent=2)
|
|
438
438
|
updated_sources = []
|
|
439
439
|
|
|
440
440
|
# 如果 updated_sources 或 keys_to_remove 有值,则保存索引文件
|
|
441
441
|
if updated_sources or keys_to_remove:
|
|
442
|
-
with open(self.index_file, "w") as file:
|
|
442
|
+
with open(self.index_file, "w",encoding="utf-8") as file:
|
|
443
443
|
json.dump(index_data, file, ensure_ascii=False, indent=2)
|
|
444
444
|
|
|
445
445
|
print("")
|
|
@@ -461,14 +461,14 @@ class IndexManager:
|
|
|
461
461
|
if not os.path.exists(self.index_file):
|
|
462
462
|
return []
|
|
463
463
|
|
|
464
|
-
with open(self.index_file, "r") as file:
|
|
464
|
+
with open(self.index_file, "r",encoding="utf-8") as file:
|
|
465
465
|
return file.read()
|
|
466
466
|
|
|
467
467
|
def read_index(self) -> List[IndexItem]:
|
|
468
468
|
if not os.path.exists(self.index_file):
|
|
469
469
|
return []
|
|
470
470
|
|
|
471
|
-
with open(self.index_file, "r") as file:
|
|
471
|
+
with open(self.index_file, "r",encoding="utf-8") as file:
|
|
472
472
|
index_data = json.load(file)
|
|
473
473
|
|
|
474
474
|
index_items = []
|
autocoder/models.py
CHANGED
|
@@ -97,7 +97,7 @@ def load_models() -> List[Dict]:
|
|
|
97
97
|
if model.get("api_key_path",""):
|
|
98
98
|
api_key_file = os.path.join(api_key_dir, model["api_key_path"])
|
|
99
99
|
if os.path.exists(api_key_file):
|
|
100
|
-
with open(api_key_file, "r") as f:
|
|
100
|
+
with open(api_key_file, "r",encoding="utf-8") as f:
|
|
101
101
|
model["api_key"] = f.read()
|
|
102
102
|
return target_models
|
|
103
103
|
|
|
@@ -269,7 +269,7 @@ def update_model_with_api_key(name: str, api_key: str) -> Dict:
|
|
|
269
269
|
api_key_dir = os.path.expanduser("~/.auto-coder/keys")
|
|
270
270
|
os.makedirs(api_key_dir, exist_ok=True)
|
|
271
271
|
api_key_file = os.path.join(api_key_dir, api_key_path)
|
|
272
|
-
with open(api_key_file, "w") as f:
|
|
272
|
+
with open(api_key_file, "w",encoding="utf-8") as f:
|
|
273
273
|
f.write(api_key.strip())
|
|
274
274
|
|
|
275
275
|
# 如果是新模型,添加到模型列表中
|
autocoder/pyproject/__init__.py
CHANGED
|
@@ -174,7 +174,8 @@ class PyProject:
|
|
|
174
174
|
return False
|
|
175
175
|
|
|
176
176
|
def output(self):
|
|
177
|
-
|
|
177
|
+
with open(self.target_file, "r",encoding="utf-8") as file:
|
|
178
|
+
return file.read()
|
|
178
179
|
|
|
179
180
|
def is_python_file(self, file_path):
|
|
180
181
|
return file_path.endswith(".py")
|
|
@@ -182,9 +183,8 @@ class PyProject:
|
|
|
182
183
|
def read_file_content(self, file_path):
|
|
183
184
|
if self.args.auto_merge == "strict_diff":
|
|
184
185
|
result = []
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
result.append(f"{line_number}:{line}")
|
|
186
|
+
for line_number, line in FileUtils.read_file_with_line_numbers(file_path,line_number_start=1):
|
|
187
|
+
result.append(f"{line_number}:{line}")
|
|
188
188
|
return "\n".join(result)
|
|
189
189
|
|
|
190
190
|
return FileUtils.read_file(file_path)
|
|
@@ -357,7 +357,7 @@ class PyProject:
|
|
|
357
357
|
self.clone_repository()
|
|
358
358
|
|
|
359
359
|
if self.target_file:
|
|
360
|
-
with open(self.target_file, "w") as file:
|
|
360
|
+
with open(self.target_file, "w",encoding="utf-8") as file:
|
|
361
361
|
|
|
362
362
|
for code in self.get_rest_source_codes():
|
|
363
363
|
self.sources.append(code)
|
|
@@ -126,7 +126,7 @@ class ByzerStorageCache(BaseCacheManager):
|
|
|
126
126
|
"""Load cache from file"""
|
|
127
127
|
if os.path.exists(self.cache_file):
|
|
128
128
|
try:
|
|
129
|
-
with open(self.cache_file, "r") as f:
|
|
129
|
+
with open(self.cache_file, "r",encoding="utf-8") as f:
|
|
130
130
|
lines = f.readlines()
|
|
131
131
|
cache = {}
|
|
132
132
|
for line in lines:
|
|
@@ -147,7 +147,7 @@ class ByzerStorageCache(BaseCacheManager):
|
|
|
147
147
|
|
|
148
148
|
if not fcntl:
|
|
149
149
|
try:
|
|
150
|
-
with open(cache_file, "w") as f:
|
|
150
|
+
with open(cache_file, "w",encoding="utf-8") as f:
|
|
151
151
|
for data in self.cache.values():
|
|
152
152
|
json.dump(data, f, ensure_ascii=False)
|
|
153
153
|
f.write("\n")
|
|
@@ -155,12 +155,12 @@ class ByzerStorageCache(BaseCacheManager):
|
|
|
155
155
|
logger.error(f"Error writing cache file: {str(e)}")
|
|
156
156
|
else:
|
|
157
157
|
lock_file = cache_file + ".lock"
|
|
158
|
-
with open(lock_file, "w") as lockf:
|
|
158
|
+
with open(lock_file, "w",encoding="utf-8") as lockf:
|
|
159
159
|
try:
|
|
160
160
|
# 获取文件锁
|
|
161
161
|
fcntl.flock(lockf, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
|
162
162
|
# 写入缓存文件
|
|
163
|
-
with open(cache_file, "w") as f:
|
|
163
|
+
with open(cache_file, "w",encoding="utf-8") as f:
|
|
164
164
|
for data in self.cache.values():
|
|
165
165
|
json.dump(data, f, ensure_ascii=False)
|
|
166
166
|
f.write("\n")
|
|
@@ -106,11 +106,11 @@ class AutoCoderRAGDocListener(BaseCacheManager):
|
|
|
106
106
|
gitignore_path = os.path.join(self.path, ".gitignore")
|
|
107
107
|
|
|
108
108
|
if os.path.exists(serveignore_path):
|
|
109
|
-
with open(serveignore_path, "r") as ignore_file:
|
|
109
|
+
with open(serveignore_path, "r",encoding="utf-8") as ignore_file:
|
|
110
110
|
patterns = ignore_file.readlines()
|
|
111
111
|
return [pattern.strip() for pattern in patterns]
|
|
112
112
|
elif os.path.exists(gitignore_path):
|
|
113
|
-
with open(gitignore_path, "r") as ignore_file:
|
|
113
|
+
with open(gitignore_path, "r",encoding="utf-8") as ignore_file:
|
|
114
114
|
patterns = ignore_file.readlines()
|
|
115
115
|
return [pattern.strip() for pattern in patterns]
|
|
116
116
|
return []
|
|
@@ -160,7 +160,7 @@ class AutoCoderRAGAsyncUpdateQueue(BaseCacheManager):
|
|
|
160
160
|
|
|
161
161
|
cache = {}
|
|
162
162
|
if os.path.exists(cache_file):
|
|
163
|
-
with open(cache_file, "r") as f:
|
|
163
|
+
with open(cache_file, "r",encoding="utf-8") as f:
|
|
164
164
|
for line in f:
|
|
165
165
|
data = json.loads(line)
|
|
166
166
|
cache[data["file_path"]] = data
|
|
@@ -171,7 +171,7 @@ class AutoCoderRAGAsyncUpdateQueue(BaseCacheManager):
|
|
|
171
171
|
cache_file = os.path.join(cache_dir, "cache.jsonl")
|
|
172
172
|
|
|
173
173
|
if not fcntl:
|
|
174
|
-
with open(cache_file, "w") as f:
|
|
174
|
+
with open(cache_file, "w",encoding="utf-8") as f:
|
|
175
175
|
for data in self.cache.values():
|
|
176
176
|
try:
|
|
177
177
|
json.dump(data, f, ensure_ascii=False)
|
|
@@ -181,12 +181,12 @@ class AutoCoderRAGAsyncUpdateQueue(BaseCacheManager):
|
|
|
181
181
|
f"Failed to write {data['file_path']} to .cache/cache.jsonl: {e}")
|
|
182
182
|
else:
|
|
183
183
|
lock_file = cache_file + ".lock"
|
|
184
|
-
with open(lock_file, "w") as lockf:
|
|
184
|
+
with open(lock_file, "w",encoding="utf-8") as lockf:
|
|
185
185
|
try:
|
|
186
186
|
# 获取文件锁
|
|
187
187
|
fcntl.flock(lockf, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
|
188
188
|
# 写入缓存文件
|
|
189
|
-
with open(cache_file, "w") as f:
|
|
189
|
+
with open(cache_file, "w",encoding="utf-8") as f:
|
|
190
190
|
for data in self.cache.values():
|
|
191
191
|
try:
|
|
192
192
|
json.dump(data, f, ensure_ascii=False)
|
|
@@ -245,10 +245,10 @@ class LongContextRAG:
|
|
|
245
245
|
gitignore_path = os.path.join(self.path, ".gitignore")
|
|
246
246
|
|
|
247
247
|
if os.path.exists(serveignore_path):
|
|
248
|
-
with open(serveignore_path, "r") as ignore_file:
|
|
248
|
+
with open(serveignore_path, "r",encoding="utf-8") as ignore_file:
|
|
249
249
|
return pathspec.PathSpec.from_lines("gitwildmatch", ignore_file)
|
|
250
250
|
elif os.path.exists(gitignore_path):
|
|
251
|
-
with open(gitignore_path, "r") as ignore_file:
|
|
251
|
+
with open(gitignore_path, "r",encoding="utf-8") as ignore_file:
|
|
252
252
|
return pathspec.PathSpec.from_lines("gitwildmatch", ignore_file)
|
|
253
253
|
return None
|
|
254
254
|
|
|
@@ -78,7 +78,8 @@ class RegexProject:
|
|
|
78
78
|
raise ValueError("Invalid project_type format. Expected 'regex//<pattern>'")
|
|
79
79
|
|
|
80
80
|
def output(self):
|
|
81
|
-
|
|
81
|
+
with open(self.target_file, "r",encoding="utf-8") as file:
|
|
82
|
+
return file.read()
|
|
82
83
|
|
|
83
84
|
def is_regex_match(self, file_path):
|
|
84
85
|
return re.search(self.regex_pattern, file_path) is not None
|
|
@@ -231,7 +232,7 @@ class RegexProject:
|
|
|
231
232
|
self.clone_repository()
|
|
232
233
|
|
|
233
234
|
if self.target_file:
|
|
234
|
-
with open(self.target_file, "w") as file:
|
|
235
|
+
with open(self.target_file, "w",encoding="utf-8") as file:
|
|
235
236
|
for code in self.get_source_codes():
|
|
236
237
|
self.sources.append(code)
|
|
237
238
|
file.write(f"##File: {code.module_name}\n")
|
|
@@ -114,7 +114,8 @@ class SuffixProject:
|
|
|
114
114
|
return False
|
|
115
115
|
|
|
116
116
|
def output(self):
|
|
117
|
-
|
|
117
|
+
with open(self.target_file, "r",encoding="utf-8") as file:
|
|
118
|
+
return file.read()
|
|
118
119
|
|
|
119
120
|
def is_suffix_file(self, file_path):
|
|
120
121
|
return any([file_path.endswith(suffix) for suffix in self.suffixs])
|
|
@@ -273,7 +274,7 @@ class SuffixProject:
|
|
|
273
274
|
self.clone_repository()
|
|
274
275
|
|
|
275
276
|
if self.target_file:
|
|
276
|
-
with open(self.target_file, "w") as file:
|
|
277
|
+
with open(self.target_file, "w",encoding="utf-8") as file:
|
|
277
278
|
for code in self.get_source_codes():
|
|
278
279
|
self.sources.append(code)
|
|
279
280
|
file.write(f"##File: {code.module_name}\n")
|
autocoder/tsproject/__init__.py
CHANGED
|
@@ -106,7 +106,8 @@ class TSProject:
|
|
|
106
106
|
return False
|
|
107
107
|
|
|
108
108
|
def output(self):
|
|
109
|
-
|
|
109
|
+
with open(self.target_file, "r",encoding="utf-8") as file:
|
|
110
|
+
return file.read()
|
|
110
111
|
|
|
111
112
|
def read_file_content(self, file_path):
|
|
112
113
|
return FileUtils.read_file(file_path)
|
|
@@ -308,7 +309,7 @@ class TSProject:
|
|
|
308
309
|
self.clone_repository()
|
|
309
310
|
|
|
310
311
|
if self.target_file:
|
|
311
|
-
with open(self.target_file, "w") as file:
|
|
312
|
+
with open(self.target_file, "w",encoding="utf-8") as file:
|
|
312
313
|
for code in self.get_rest_source_codes():
|
|
313
314
|
self.sources.append(code)
|
|
314
315
|
file.write(f"##File: {code.module_name}\n")
|
|
@@ -34,7 +34,7 @@ def load_code_model_conversation_from_store(args: AutoCoderArgs):
|
|
|
34
34
|
return []
|
|
35
35
|
|
|
36
36
|
conversations = []
|
|
37
|
-
with open(conversation_file, "r") as f:
|
|
37
|
+
with open(conversation_file, "r",encoding="utf-8") as f:
|
|
38
38
|
for line in f:
|
|
39
39
|
conversations.append(json.loads(line))
|
|
40
40
|
|
|
@@ -15,7 +15,7 @@ import hashlib
|
|
|
15
15
|
def convert_yaml_to_config(yaml_file: str):
|
|
16
16
|
|
|
17
17
|
args = AutoCoderArgs()
|
|
18
|
-
with open(yaml_file, "r") as f:
|
|
18
|
+
with open(yaml_file, "r",encoding="utf-8") as f:
|
|
19
19
|
config = yaml.safe_load(f)
|
|
20
20
|
config = load_include_files(config, yaml_file)
|
|
21
21
|
for key, value in config.items():
|
|
@@ -75,7 +75,7 @@ def get_llm_friendly_package_docs(memory,
|
|
|
75
75
|
if return_paths:
|
|
76
76
|
docs.append(file_path)
|
|
77
77
|
else:
|
|
78
|
-
with open(file_path, "r") as f:
|
|
78
|
+
with open(file_path, "r",encoding="utf-8") as f:
|
|
79
79
|
docs.append(f.read())
|
|
80
80
|
|
|
81
81
|
return docs
|
|
@@ -130,7 +130,7 @@ def get_llm(memory, model:Optional[str]=None):
|
|
|
130
130
|
# 临时保存yaml文件,然后读取yaml文件,转换为args
|
|
131
131
|
temp_yaml = os.path.join("actions", f"{uuid.uuid4()}.yml")
|
|
132
132
|
try:
|
|
133
|
-
with open(temp_yaml, "w") as f:
|
|
133
|
+
with open(temp_yaml, "w",encoding="utf-8") as f:
|
|
134
134
|
f.write(convert_yaml_config_to_str(
|
|
135
135
|
yaml_config=yaml_config))
|
|
136
136
|
args = convert_yaml_to_config(temp_yaml)
|
autocoder/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.1.
|
|
1
|
+
__version__ = "0.1.265"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|