auto-coder 0.1.172__py3-none-any.whl → 0.1.175__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

@@ -18,7 +18,7 @@ from rich.console import Console
18
18
  from rich.table import Table
19
19
  import os
20
20
 
21
- from autocoder.rag.document_retriever import process_file3
21
+ from autocoder.rag.document_retriever import process_file_local
22
22
  from autocoder.rag.token_counter import TokenCounter
23
23
 
24
24
  if platform.system() == "Windows":
@@ -90,16 +90,24 @@ def initialize_system():
90
90
 
91
91
  if choice == "1":
92
92
  print_status(get_message("deploying_model").format("Deepseek官方"), "")
93
-
93
+
94
94
  deploy_cmd = [
95
- "byzerllm", "deploy",
96
- "--pretrained_model_type", "saas/openai",
97
- "--cpus_per_worker", "0.001",
98
- "--gpus_per_worker", "0",
99
- "--worker_concurrency", "1000",
100
- "--num_workers", "1",
101
- "--infer_params", f"saas.base_url=https://api.deepseek.com/v1 saas.api_key={api_key} saas.model=deepseek-chat",
102
- "--model", "deepseek_chat"
95
+ "byzerllm",
96
+ "deploy",
97
+ "--pretrained_model_type",
98
+ "saas/openai",
99
+ "--cpus_per_worker",
100
+ "0.001",
101
+ "--gpus_per_worker",
102
+ "0",
103
+ "--worker_concurrency",
104
+ "1000",
105
+ "--num_workers",
106
+ "1",
107
+ "--infer_params",
108
+ f"saas.base_url=https://api.deepseek.com/v1 saas.api_key={api_key} saas.model=deepseek-chat",
109
+ "--model",
110
+ "deepseek_chat",
103
111
  ]
104
112
 
105
113
  try:
@@ -138,7 +146,9 @@ def main(input_args: Optional[List[str]] = None):
138
146
 
139
147
  # Serve command
140
148
  serve_parser = subparsers.add_parser("serve", help="Start the RAG server")
141
- serve_parser.add_argument("--quick", action="store_true", help="Skip system initialization")
149
+ serve_parser.add_argument(
150
+ "--quick", action="store_true", help="Skip system initialization"
151
+ )
142
152
  serve_parser.add_argument("--file", default="", help=desc["file"])
143
153
  serve_parser.add_argument("--model", default="deepseek_chat", help=desc["model"])
144
154
  serve_parser.add_argument("--index_model", default="", help=desc["index_model"])
@@ -160,7 +170,19 @@ def main(input_args: Optional[List[str]] = None):
160
170
  "--rag_context_window_limit",
161
171
  type=int,
162
172
  default=110000,
163
- help="",
173
+ help="The input context window limit for RAG",
174
+ )
175
+ serve_parser.add_argument(
176
+ "--full_text_ratio",
177
+ type=float,
178
+ default=0.7,
179
+ help="The ratio of full text area in the input context window (0.0 to 1.0)",
180
+ )
181
+ serve_parser.add_argument(
182
+ "--segment_ratio",
183
+ type=float,
184
+ default=0.2,
185
+ help="The ratio of segment area in the input context window (0.0 to 1.0)",
164
186
  )
165
187
  serve_parser.add_argument(
166
188
  "--required_exts", default="", help=desc["doc_build_parse_required_exts"]
@@ -198,6 +220,12 @@ def main(input_args: Optional[List[str]] = None):
198
220
  help="Monitor mode for the doc update",
199
221
  )
200
222
 
223
+ serve_parser.add_argument(
224
+ "--disable_auto_window",
225
+ action="store_true",
226
+ help="Disable automatic window adaptation for documents",
227
+ )
228
+
201
229
  # Tools command
202
230
  tools_parser = subparsers.add_parser("tools", help="Various tools")
203
231
  tools_subparsers = tools_parser.add_subparsers(dest="tool", help="Available tools")
@@ -255,7 +283,7 @@ def main(input_args: Optional[List[str]] = None):
255
283
 
256
284
  def count_tokens(tokenizer_path: str, file_path: str):
257
285
  token_counter = TokenCounter(tokenizer_path)
258
- source_codes = process_file3(file_path)
286
+ source_codes = process_file_local(file_path)
259
287
 
260
288
  console = Console()
261
289
  table = Table(title="Token Count Results")
@@ -118,9 +118,66 @@ commands = [
118
118
  "/summon",
119
119
  "/mode",
120
120
  "/lib",
121
+ "/design",
121
122
  ]
122
123
 
123
124
 
125
+ def show_help():
126
+ print(f"\033[1m{get_message('supported_commands')}\033[0m")
127
+ print()
128
+ print(
129
+ f" \033[94m{get_message('commands')}\033[0m - \033[93m{get_message('description')}\033[0m"
130
+ )
131
+ print(
132
+ f" \033[94m/add_files\033[0m \033[93m<file1> <file2> ...\033[0m - \033[92m{get_message('add_files_desc')}\033[0m"
133
+ )
134
+ print(
135
+ f" \033[94m/remove_files\033[0m \033[93m<file1>,<file2> ...\033[0m - \033[92m{get_message('remove_files_desc')}\033[0m"
136
+ )
137
+ print(
138
+ f" \033[94m/chat\033[0m \033[93m<query>\033[0m - \033[92m{get_message('chat_desc')}\033[0m"
139
+ )
140
+ print(
141
+ f" \033[94m/coding\033[0m \033[93m<query>\033[0m - \033[92m{get_message('coding_desc')}\033[0m"
142
+ )
143
+ print(
144
+ f" \033[94m/design\033[0m \033[93m<query>\033[0m - \033[92m{get_message('design_desc')}\033[0m"
145
+ )
146
+ print(
147
+ f" \033[94m/ask\033[0m \033[93m<query>\033[0m - \033[92m{get_message('ask_desc')}\033[0m"
148
+ )
149
+ print(
150
+ f" \033[94m/summon\033[0m \033[93m<query>\033[0m - \033[92m{get_message('summon_desc')}\033[0m"
151
+ )
152
+ print(f" \033[94m/revert\033[0m - \033[92m{get_message('revert_desc')}\033[0m")
153
+ print(
154
+ f" \033[94m/conf\033[0m \033[93m<key>:<value>\033[0m - \033[92m{get_message('conf_desc')}\033[0m"
155
+ )
156
+ print(
157
+ f" \033[94m/index/query\033[0m \033[93m<args>\033[0m - \033[92m{get_message('index_query_desc')}\033[0m"
158
+ )
159
+ print(
160
+ f" \033[94m/index/build\033[0m - \033[92m{get_message('index_build_desc')}\033[0m"
161
+ )
162
+ print(
163
+ f" \033[94m/list_files\033[0m - \033[92m{get_message('list_files_desc')}\033[0m"
164
+ )
165
+ print(f" \033[94m/help\033[0m - \033[92m{get_message('help_desc')}\033[0m")
166
+ print(
167
+ f" \033[94m/exclude_dirs\033[0m \033[93m<dir1>,<dir2> ...\033[0m - \033[92m{get_message('exclude_dirs_desc')}\033[0m"
168
+ )
169
+ print(
170
+ f" \033[94m/shell\033[0m \033[93m<command>\033[0m - \033[92m{get_message('shell_desc')}\033[0m"
171
+ )
172
+ print(
173
+ f" \033[94m/voice_input\033[0m - \033[92m{get_message('voice_input_desc')}\033[0m"
174
+ )
175
+ print(f" \033[94m/mode\033[0m - \033[92m{get_message('mode_desc')}\033[0m")
176
+ print(f" \033[94m/lib\033[0m - \033[92m{get_message('lib_desc')}\033[0m")
177
+ print(f" \033[94m/exit\033[0m - \033[92m{get_message('exit_desc')}\033[0m")
178
+ print()
179
+
180
+
124
181
  def configure_project_type():
125
182
  from prompt_toolkit.lexers import PygmentsLexer
126
183
  from pygments.lexers.markup import MarkdownLexer
@@ -280,14 +337,22 @@ def initialize_system():
280
337
  else:
281
338
  print_status(get_message("deploying_model").format("Deepseek官方"), "")
282
339
  deploy_cmd = [
283
- "byzerllm", "deploy",
284
- "--pretrained_model_type", "saas/openai",
285
- "--cpus_per_worker", "0.001",
286
- "--gpus_per_worker", "0",
287
- "--worker_concurrency", "1000",
288
- "--num_workers", "1",
289
- "--infer_params", f"saas.base_url=https://api.deepseek.com/v1 saas.api_key={api_key} saas.model=deepseek-chat",
290
- "--model", "deepseek_chat"
340
+ "byzerllm",
341
+ "deploy",
342
+ "--pretrained_model_type",
343
+ "saas/openai",
344
+ "--cpus_per_worker",
345
+ "0.001",
346
+ "--gpus_per_worker",
347
+ "0",
348
+ "--worker_concurrency",
349
+ "1000",
350
+ "--num_workers",
351
+ "1",
352
+ "--infer_params",
353
+ f"saas.base_url=https://api.deepseek.com/v1 saas.api_key={api_key} saas.model=deepseek-chat",
354
+ "--model",
355
+ "deepseek_chat",
291
356
  ]
292
357
 
293
358
  try:
@@ -716,7 +781,7 @@ class CommandCompleter(Completer):
716
781
 
717
782
  if current_word.startswith("@@"):
718
783
  name = current_word[2:]
719
- for symbol in self.symbol_list:
784
+ for symbol in self.symbol_list:
720
785
  if name in symbol.symbol_name:
721
786
  file_name = symbol.file_name
722
787
  path_parts = file_name.split(os.sep)
@@ -1060,7 +1125,7 @@ def add_files(args: List[str]):
1060
1125
 
1061
1126
  print_formatted_text(
1062
1127
  HTML(
1063
- "<b>Type Atom Group Desc (Prese [Esc] + [Enter] to finish.)</b><br>"
1128
+ "<b>Type Atom Group Desc (Prese [Esc] + [Enter] to finish.)</b><br/>"
1064
1129
  )
1065
1130
  )
1066
1131
  text = prompt(
@@ -1340,7 +1405,8 @@ def coding(query: str):
1340
1405
  "skip_build_index": conf.get("skip_build_index", "true") == "true",
1341
1406
  "skip_confirm": conf.get("skip_confirm", "true") == "true",
1342
1407
  "silence": conf.get("silence", "true") == "true",
1343
- "include_project_structure": conf.get("include_project_structure", "true") == "true",
1408
+ "include_project_structure": conf.get("include_project_structure", "true")
1409
+ == "true",
1344
1410
  }
1345
1411
 
1346
1412
  for key, value in conf.items():
@@ -1447,18 +1513,22 @@ def chat(query: str):
1447
1513
  file_contents = []
1448
1514
  for file in current_files:
1449
1515
  if os.path.exists(file):
1450
- with open(file, "r") as f:
1451
- content = f.read()
1452
- s = f"##File: {file}\n{content}\n\n"
1453
- file_contents.append(s)
1516
+ try:
1517
+ with open(file, "r") as f:
1518
+ content = f.read()
1519
+ s = f"##File: {file}\n{content}\n\n"
1520
+ file_contents.append(s)
1521
+ except Exception as e:
1522
+ print(f"Failed to read file: {file}. Error: {str(e)}")
1454
1523
 
1455
1524
  all_file_content = "".join(file_contents)
1456
1525
 
1457
1526
  yaml_config = {
1458
1527
  "include_file": ["./base/base.yml"],
1459
- "include_project_structure": conf.get("include_project_structure", "true") == "true",
1528
+ "include_project_structure": conf.get("include_project_structure", "true")
1529
+ == "true",
1460
1530
  }
1461
-
1531
+
1462
1532
  yaml_config["context"] = json.dumps(
1463
1533
  {"file_content": all_file_content}, ensure_ascii=False
1464
1534
  )
@@ -1517,10 +1587,13 @@ def summon(query: str):
1517
1587
  file_contents = []
1518
1588
  for file in current_files:
1519
1589
  if os.path.exists(file):
1520
- with open(file, "r") as f:
1521
- content = f.read()
1522
- s = f"##File: {file}\n{content}\n\n"
1523
- file_contents.append(s)
1590
+ try:
1591
+ with open(file, "r") as f:
1592
+ content = f.read()
1593
+ s = f"##File: {file}\n{content}\n\n"
1594
+ file_contents.append(s)
1595
+ except Exception as e:
1596
+ print(f"Failed to read file: {file}. Error: {str(e)}")
1524
1597
 
1525
1598
  all_file_content = "".join(file_contents)
1526
1599
 
@@ -1560,6 +1633,49 @@ def summon(query: str):
1560
1633
  os.remove(execute_file)
1561
1634
 
1562
1635
 
1636
+ def design(query: str):
1637
+
1638
+ conf = memory.get("conf", {})
1639
+ yaml_config = {
1640
+ "include_file": ["./base/base.yml"],
1641
+ }
1642
+
1643
+ if query.strip().startswith("/svg"):
1644
+ query = query.replace("/svg", "", 1).strip()
1645
+ yaml_config["agent_designer_mode"] = "svg"
1646
+ elif query.strip().startswith("/sd"):
1647
+ query = query.replace("/svg", "", 1).strip()
1648
+ yaml_config["agent_designer_mode"] = "sd"
1649
+ else:
1650
+ yaml_config["agent_designer_mode"] = "svg"
1651
+
1652
+ yaml_config["query"] = query
1653
+
1654
+ if "model" in conf:
1655
+ yaml_config["model"] = conf["model"]
1656
+
1657
+ if "designer_model" in conf:
1658
+ yaml_config["designer_model"] = conf["designer_model"]
1659
+
1660
+ if "sd_model" in conf:
1661
+ yaml_config["sd_model"] = conf["sd_model"]
1662
+
1663
+ yaml_content = convert_yaml_config_to_str(yaml_config=yaml_config)
1664
+
1665
+ execute_file = os.path.join("actions", f"{uuid.uuid4()}.yml")
1666
+
1667
+ with open(os.path.join(execute_file), "w") as f:
1668
+ f.write(yaml_content)
1669
+
1670
+ def execute_design():
1671
+ auto_coder_main(["agent", "designer", "--file", execute_file])
1672
+
1673
+ try:
1674
+ execute_design()
1675
+ finally:
1676
+ os.remove(execute_file)
1677
+
1678
+
1563
1679
  def voice_input():
1564
1680
  conf = memory.get("conf", {})
1565
1681
  yaml_config = {
@@ -2096,6 +2212,13 @@ def main():
2096
2212
  else:
2097
2213
  chat(query)
2098
2214
 
2215
+ elif user_input.startswith("/design"):
2216
+ query = user_input[len("/design") :].strip()
2217
+ if not query:
2218
+ print("\033[91mPlease enter your design request.\033[0m")
2219
+ else:
2220
+ design(query)
2221
+
2099
2222
  elif user_input.startswith("/summon"):
2100
2223
  query = user_input[len("/summon") :].strip()
2101
2224
  if not query:
@@ -64,6 +64,7 @@ MESSAGES = {
64
64
  "mode_desc": "Switch input mode",
65
65
  "lib_desc": "Manage libraries",
66
66
  "exit_desc": "Exit the program",
67
+ "design_desc": "Generate SVG image based on the provided description",
67
68
  },
68
69
  "zh": {
69
70
  "initializing": "🚀 正在初始化系统...",
@@ -128,6 +129,8 @@ MESSAGES = {
128
129
  "mode_desc": "切换输入模式",
129
130
  "lib_desc": "管理库",
130
131
  "exit_desc": "退出程序",
132
+ "design_desc": "根据需求设计SVG图片",
133
+
131
134
  }
132
135
  }
133
136
 
autocoder/command_args.py CHANGED
@@ -103,8 +103,8 @@ def parse_args(input_args: Optional[List[str]] = None) -> AutoCoderArgs:
103
103
  parser.add_argument("--urls", default="", help=desc["urls"])
104
104
  parser.add_argument(
105
105
  "--urls_use_model", action="store_true", help=desc["urls_use_model"]
106
- )
107
- parser.add_argument("--exclude_files", default="", help="")
106
+ )
107
+ parser.add_argument("--designer_model", default="", help=desc["designer_model"])
108
108
  parser.add_argument("--query_prefix", default=None, help=desc["query_prefix"])
109
109
  parser.add_argument("--query_suffix", default=None, help=desc["query_suffix"])
110
110
 
@@ -546,6 +546,16 @@ def parse_args(input_args: Optional[List[str]] = None) -> AutoCoderArgs:
546
546
  )
547
547
  auto_tool_parser.add_argument("--target_file", default="./output.txt", help="")
548
548
 
549
+ designer_parser = agent_subparsers.add_parser(
550
+ "designer", help="Run the designer agent"
551
+ )
552
+ designer_parser.add_argument("--request_id", default="", help=desc["request_id"])
553
+ designer_parser.add_argument("--source_dir", default=".", help="Source directory")
554
+ designer_parser.add_argument("--query", help="Query for the designer")
555
+ designer_parser.add_argument("--model", default="", help=desc["model"])
556
+ designer_parser.add_argument("--file", default="", help=desc["file"])
557
+ designer_parser.add_argument("--ray_address", default="auto", help=desc["ray_address"])
558
+
549
559
  planner_parser = agent_subparsers.add_parser(
550
560
  "planner", help="Run the planner agent"
551
561
  )
@@ -11,6 +11,8 @@ class SourceCode(pydantic.BaseModel):
11
11
  module_name: str
12
12
  source_code: str
13
13
  tag: str = ""
14
+ tokens: int = -1
15
+ metadata: Dict[str, Any] = {}
14
16
 
15
17
 
16
18
  class TranslateReadme(pydantic.BaseModel):
@@ -246,6 +248,7 @@ class AutoCoderArgs(pydantic.BaseModel):
246
248
  index_build_workers: Optional[int] = 1
247
249
 
248
250
  planner_model: Optional[str] = ""
251
+ designer_model: Optional[str] = ""
249
252
  file: Optional[str] = ""
250
253
  ray_address: Optional[str] = ""
251
254
  anti_quota_limit: Optional[int] = 1
@@ -280,7 +283,8 @@ class AutoCoderArgs(pydantic.BaseModel):
280
283
  doc_command: Optional[str] = None
281
284
  required_exts: Optional[str] = None
282
285
 
283
- monitor_mode: Optional[bool] = False
286
+ monitor_mode: bool = False
287
+ disable_auto_window: bool = False
284
288
 
285
289
  description: Optional[str] = ""
286
290
  skip_confirm: Optional[bool] = False
@@ -301,5 +305,11 @@ class AutoCoderArgs(pydantic.BaseModel):
301
305
 
302
306
  prompt_review: Optional[str] = None
303
307
 
308
+ agent_designer_mode: Optional[str] = "svg"
309
+
310
+ full_text_ratio: Optional[float] = 0.7
311
+ segment_ratio: Optional[float] = 0.2
312
+ buff_ratio: Optional[float] = 0.1
313
+
304
314
  class Config:
305
315
  protected_namespaces = ()
@@ -8,6 +8,10 @@ COMMANDS = {
8
8
  "/group": {"/add": "", "/drop": "", "/reset": "", "set": ""},
9
9
  "/refresh": {},
10
10
  },
11
+ "/designer": {
12
+ "/svg": {},
13
+ "/sd": {},
14
+ },
11
15
  "/coding": {},
12
16
  "/chat": {"/new": {}, "/review": {}, "/no_context": {}},
13
17
  "/lib": {
@@ -22,16 +22,15 @@ def _generate_shell_script(user_input: str) -> str:
22
22
  不支持Bash
23
23
  {%- endif %}
24
24
 
25
- 根据用户的输入生成一个 shell 脚本。
25
+ 根据用户的输入以及当前的操作系统生成合适的 shell 脚本。
26
26
 
27
27
  用户输入: {{ user_input }}
28
28
 
29
- 请生成一个适当的 shell 脚本来执行用户的请求。确保脚本是安全的,并且可以在 bash shell 中运行。
30
- 脚本应该以 #!/bin/bash 开头,并包含必要的注释来解释每个步骤。
29
+ 请生成一个适当的 shell 脚本来执行用户的请求。确保脚本是安全的,并且可以在操作系统支持的 shell 中运行。
30
+ 脚本应该包含必要的注释来解释每个步骤。
31
31
  脚本内容请用如下方式返回:
32
32
 
33
- ```shell
34
- #!/bin/bash
33
+ ```shell
35
34
  # 你的 shell 脚本内容
36
35
  ```
37
36
  """
autocoder/lang.py CHANGED
@@ -62,6 +62,7 @@ lang_desc = {
62
62
  "screenshot_output": "The directory to save the screenshots",
63
63
  "code_model": "The name of the code model to use. Default is empty",
64
64
  "planner_model": "The name of the planner model to use. Default is empty",
65
+ "designer_model": "The name of the designer model to use. Default is empty",
65
66
  "query_prefix": "The query prefix",
66
67
  "query_suffix": "The query suffix",
67
68
  "next_from_yaml": "The YAML file to copy content from when creating a new action file. It supports prefix matching, e.g., specifying '001' will match '001_abc.yml'.",
@@ -134,6 +135,7 @@ lang_desc = {
134
135
  "code_model": "要使用的代码模型的名称。默认为空",
135
136
  "next_desc": "基于上一个action文件创建一个新的action文件",
136
137
  "planner_model": "要使用的规划模型的名称。默认为空",
138
+ "designer_model": "要使用的设计模型的名称。默认为空",
137
139
  "query_prefix": "查询前缀",
138
140
  "query_suffix": "查询后缀",
139
141
  "next_from_yaml": "创建新的action文件时要从中复制内容的YAML文件。支持前缀匹配,例如,指定'001'将匹配'001_abc.yml'。",
@@ -187,7 +187,11 @@ class PyProject:
187
187
 
188
188
  def convert_to_source_code(self, file_path):
189
189
  module_name = file_path
190
- source_code = self.read_file_content(file_path)
190
+ try:
191
+ source_code = self.read_file_content(file_path)
192
+ except Exception as e:
193
+ logger.warning(f"Failed to read file: {file_path}. Error: {str(e)}")
194
+ return None
191
195
  return SourceCode(module_name=module_name, source_code=source_code)
192
196
 
193
197
  def get_package_source_codes(