auto-coder 0.1.353__py3-none-any.whl → 0.1.355__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

Files changed (60) hide show
  1. {auto_coder-0.1.353.dist-info → auto_coder-0.1.355.dist-info}/METADATA +1 -1
  2. {auto_coder-0.1.353.dist-info → auto_coder-0.1.355.dist-info}/RECORD +60 -45
  3. autocoder/agent/agentic_filter.py +1 -1
  4. autocoder/auto_coder.py +8 -0
  5. autocoder/auto_coder_rag.py +37 -1
  6. autocoder/auto_coder_runner.py +58 -77
  7. autocoder/chat/conf_command.py +270 -0
  8. autocoder/chat/models_command.py +485 -0
  9. autocoder/chat_auto_coder.py +29 -24
  10. autocoder/chat_auto_coder_lang.py +26 -2
  11. autocoder/commands/auto_command.py +60 -132
  12. autocoder/commands/auto_web.py +1 -1
  13. autocoder/commands/tools.py +1 -1
  14. autocoder/common/__init__.py +3 -1
  15. autocoder/common/command_completer.py +58 -12
  16. autocoder/common/command_completer_v2.py +576 -0
  17. autocoder/common/conversations/__init__.py +52 -0
  18. autocoder/common/conversations/compatibility.py +303 -0
  19. autocoder/common/conversations/conversation_manager.py +502 -0
  20. autocoder/common/conversations/example.py +152 -0
  21. autocoder/common/file_monitor/__init__.py +5 -0
  22. autocoder/common/file_monitor/monitor.py +383 -0
  23. autocoder/common/global_cancel.py +53 -16
  24. autocoder/common/ignorefiles/__init__.py +4 -0
  25. autocoder/common/ignorefiles/ignore_file_utils.py +103 -0
  26. autocoder/common/ignorefiles/test_ignore_file_utils.py +91 -0
  27. autocoder/common/rulefiles/__init__.py +15 -0
  28. autocoder/common/rulefiles/autocoderrules_utils.py +173 -0
  29. autocoder/common/save_formatted_log.py +54 -0
  30. autocoder/common/v2/agent/agentic_edit.py +10 -39
  31. autocoder/common/v2/agent/agentic_edit_tools/list_files_tool_resolver.py +1 -1
  32. autocoder/common/v2/agent/agentic_edit_tools/search_files_tool_resolver.py +73 -43
  33. autocoder/common/v2/code_agentic_editblock_manager.py +9 -9
  34. autocoder/common/v2/code_diff_manager.py +2 -2
  35. autocoder/common/v2/code_editblock_manager.py +31 -18
  36. autocoder/common/v2/code_strict_diff_manager.py +3 -2
  37. autocoder/dispacher/actions/action.py +6 -6
  38. autocoder/dispacher/actions/plugins/action_regex_project.py +2 -2
  39. autocoder/events/event_manager_singleton.py +1 -1
  40. autocoder/index/index.py +3 -3
  41. autocoder/models.py +22 -9
  42. autocoder/rag/api_server.py +14 -2
  43. autocoder/rag/cache/local_byzer_storage_cache.py +1 -1
  44. autocoder/rag/cache/local_duckdb_storage_cache.py +8 -0
  45. autocoder/rag/cache/simple_cache.py +63 -33
  46. autocoder/rag/loaders/docx_loader.py +1 -1
  47. autocoder/rag/loaders/filter_utils.py +133 -76
  48. autocoder/rag/loaders/image_loader.py +15 -3
  49. autocoder/rag/loaders/pdf_loader.py +2 -2
  50. autocoder/rag/long_context_rag.py +11 -0
  51. autocoder/rag/qa_conversation_strategy.py +5 -31
  52. autocoder/rag/utils.py +21 -2
  53. autocoder/utils/_markitdown.py +66 -25
  54. autocoder/utils/auto_coder_utils/chat_stream_out.py +4 -4
  55. autocoder/utils/thread_utils.py +9 -27
  56. autocoder/version.py +1 -1
  57. {auto_coder-0.1.353.dist-info → auto_coder-0.1.355.dist-info}/LICENSE +0 -0
  58. {auto_coder-0.1.353.dist-info → auto_coder-0.1.355.dist-info}/WHEEL +0 -0
  59. {auto_coder-0.1.353.dist-info → auto_coder-0.1.355.dist-info}/entry_points.txt +0 -0
  60. {auto_coder-0.1.353.dist-info → auto_coder-0.1.355.dist-info}/top_level.txt +0 -0
@@ -21,7 +21,7 @@ from autocoder.common import shells
21
21
  from loguru import logger
22
22
  from autocoder.utils import llms as llms_utils
23
23
  from autocoder.rag.token_counter import count_tokens
24
- from autocoder.common.global_cancel import global_cancel
24
+ from autocoder.common.global_cancel import global_cancel,CancelRequestedException
25
25
  from autocoder.common.auto_configure import config_readme
26
26
  from autocoder.utils.auto_project_type import ProjectTypeAnalyzer
27
27
  from rich.text import Text
@@ -29,105 +29,10 @@ from autocoder.common.mcp_server import get_mcp_server, McpServerInfoRequest
29
29
  from autocoder.common.action_yml_file_manager import ActionYmlFileManager
30
30
  from autocoder.events.event_manager_singleton import get_event_manager
31
31
  from autocoder.events import event_content as EventContentCreator
32
+ from autocoder.events.event_types import Event, EventType, EventMetadata
32
33
  from autocoder.run_context import get_run_context
33
34
  from autocoder.common.stream_out_type import AutoCommandStreamOutType
34
- class CommandMessage(BaseModel):
35
- role: str
36
- content: str
37
-
38
-
39
- class ExtendedCommandMessage(BaseModel):
40
- message: CommandMessage
41
- timestamp: str
42
-
43
-
44
- class CommandConversation(BaseModel):
45
- history: Dict[str, ExtendedCommandMessage]
46
- current_conversation: List[ExtendedCommandMessage]
47
-
48
-
49
- def load_memory_file(args: AutoCoderArgs) -> CommandConversation:
50
- """Load command conversations from memory file"""
51
-
52
- memory_dir = os.path.join(".auto-coder", "memory")
53
- file_path = os.path.join(memory_dir, "command_chat_history.json")
54
- if os.path.exists(file_path):
55
- with open(file_path, "r", encoding="utf-8") as f:
56
- try:
57
- conversation = CommandConversation.model_validate_json(
58
- f.read())
59
- return conversation
60
- except Exception:
61
- return CommandConversation(history={}, current_conversation=[])
62
- return CommandConversation(history={}, current_conversation=[])
63
-
64
-
65
- class TimeBasedStrategy:
66
- def __init__(self, max_idle_time=3600*24): # 24 hour in seconds
67
- self.max_idle_time = max_idle_time
68
-
69
- def should_archive(self, last_message_time):
70
- """Check if the conversation should be archived based on last message time"""
71
- current_time = time.time()
72
- return current_time - last_message_time > self.max_idle_time
73
-
74
-
75
- def save_to_memory_file(query: str, response: str):
76
- """Save command conversation to memory file using CommandConversation structure"""
77
- memory_dir = os.path.join(".auto-coder", "memory")
78
- os.makedirs(memory_dir, exist_ok=True)
79
- file_path = os.path.join(memory_dir, "command_chat_history.json")
80
-
81
- # Initialize time-based strategy
82
- time_strategy = TimeBasedStrategy()
83
-
84
- # Create new message objects
85
- current_time = time.time()
86
- user_msg = CommandMessage(role="user", content=query)
87
- assistant_msg = CommandMessage(role="assistant", content=response)
88
-
89
- extended_user_msg = ExtendedCommandMessage(
90
- message=user_msg,
91
- timestamp=str(int(current_time))
92
- )
93
- extended_assistant_msg = ExtendedCommandMessage(
94
- message=assistant_msg,
95
- timestamp=str(int(current_time))
96
- )
97
-
98
- # Load existing conversation or create new
99
- if os.path.exists(file_path):
100
- with open(file_path, "r", encoding="utf-8") as f:
101
- try:
102
- existing_conv = CommandConversation.model_validate_json(
103
- f.read())
104
- # Check if we should archive current conversation
105
- if existing_conv.current_conversation:
106
- last_message_time = float(
107
- existing_conv.current_conversation[-1].timestamp)
108
- if time_strategy.should_archive(last_message_time):
109
- # Move current conversation to history
110
- timestamp = str(int(last_message_time))
111
- existing_conv.history[timestamp] = existing_conv.current_conversation
112
- existing_conv.current_conversation = []
113
- except Exception:
114
- existing_conv = CommandConversation(
115
- history={},
116
- current_conversation=[]
117
- )
118
- else:
119
- existing_conv = CommandConversation(
120
- history={},
121
- current_conversation=[]
122
- )
123
-
124
- existing_conv.current_conversation.append(extended_user_msg)
125
- existing_conv.current_conversation.append(extended_assistant_msg)
126
- # Save updated conversation
127
- with open(file_path, "w", encoding="utf-8") as f:
128
- f.write(existing_conv.model_dump_json(indent=2))
129
-
130
-
35
+ from autocoder.common.rulefiles.autocoderrules_utils import get_rules
131
36
  class CommandSuggestion(BaseModel):
132
37
  command: str
133
38
  parameters: Dict[str, Any]
@@ -203,17 +108,14 @@ class CommandAutoTuner:
203
108
  self.mcp_server_info = mcp_server_info_response.result
204
109
  except Exception as e:
205
110
  logger.error(f"Error getting MCP server info: {str(e)}")
206
- self.mcp_server_info = ""
207
-
208
- def get_conversations(self) -> List[CommandMessage]:
209
- """Get conversation history from memory file"""
210
- conversation = load_memory_file(args=self.args)
211
- return [command_message.message for command_message in conversation.current_conversation]
111
+ self.mcp_server_info = ""
212
112
 
213
113
  @byzerllm.prompt()
214
114
  def _analyze(self, request: AutoCommandRequest) -> str:
215
115
  """
216
116
  你是 auto-coder.chat 软件,帮助用户完成编程方面的需求。我们的目标是根据用户输入和当前上下文,组合多个函数来完成用户的需求。
117
+
118
+ ====
217
119
 
218
120
  ## 当前用户环境信息如下:
219
121
  <os_info>
@@ -254,6 +156,8 @@ class CommandAutoTuner:
254
156
  {{ current_conf }}
255
157
  </current_conf>
256
158
 
159
+ ====
160
+
257
161
  ## 可用函数列表:
258
162
  {{ available_commands }}
259
163
 
@@ -263,6 +167,8 @@ class CommandAutoTuner:
263
167
  ## 函数组合说明:
264
168
  {{ command_combination_readme }}
265
169
 
170
+ ====
171
+
266
172
  ## active-context 项目追踪文档系统
267
173
 
268
174
  在 {{ current_project }}/.auto-coder/active-context 下,我们提供了对该项目每个文件目录的追踪。
@@ -279,9 +185,10 @@ class CommandAutoTuner:
279
185
  - query: 用户需求
280
186
  - urls: 用户提供的上下文文件列表
281
187
  - dynamic_urls: auto-coder.chat 自动感知的一些文件列表
282
- - add_updated_urls: 这次需求发生变更的文件列表
283
-
188
+ - add_updated_urls: 这次需求发生变更的文件列表
189
+
284
190
  {% if conversation_history %}
191
+ ====
285
192
  ## 历史对话:
286
193
  <conversation_history>
287
194
  {% for conv in conversation_history %}
@@ -289,6 +196,18 @@ class CommandAutoTuner:
289
196
  {% endfor %}
290
197
  </conversation_history>
291
198
  {% endif %}
199
+
200
+ {% if rules %}
201
+ ====
202
+
203
+ 用户提供的规则文件,你必须严格遵守。
204
+ {% for key, value in rules.items() %}
205
+ <user_rule>
206
+ ##File: {{ key }}
207
+ {{ value }}
208
+ </user_rule>
209
+ {% endfor %}
210
+ {% endif %}
292
211
 
293
212
  ## 用户需求:
294
213
  <user_input>
@@ -326,7 +245,8 @@ class CommandAutoTuner:
326
245
  return {
327
246
  "user_input": request.user_input,
328
247
  "current_files": self.memory_config.memory["current_files"]["files"],
329
- "conversation_history": self.get_conversations(),
248
+ "conversation_history": [],
249
+ "rules": get_rules(),
330
250
  "available_commands": self._command_readme.prompt(),
331
251
  "current_conf": json.dumps(self.memory_config.memory["conf"], indent=2),
332
252
  "env_info": env_info,
@@ -461,7 +381,8 @@ class CommandAutoTuner:
461
381
  display_func=extract_command_response,
462
382
  args=self.args,
463
383
  extra_meta={
464
- "stream_out_type": AutoCommandStreamOutType.COMMAND_SUGGESTION.value
384
+ "stream_out_type": AutoCommandStreamOutType.COMMAND_SUGGESTION.value,
385
+ "path": "/agentic/agent/command_suggestion"
465
386
  }
466
387
  )
467
388
 
@@ -515,16 +436,11 @@ class CommandAutoTuner:
515
436
  conversations.append({"role": "assistant", "content": result})
516
437
  # 提取 JSON 并转换为 AutoCommandResponse
517
438
  response = to_model(result, AutoCommandResponse)
518
-
519
- # 保存对话记录
520
- save_to_memory_file(
521
- query=request.user_input,
522
- response=response.model_dump_json(indent=2)
523
- )
439
+
524
440
  result_manager = ResultManager()
525
441
 
526
442
  while True:
527
- global_cancel.check_and_raise()
443
+ global_cancel.check_and_raise(token=self.args.event_file)
528
444
  # 执行命令
529
445
  command = response.suggestions[0].command
530
446
  parameters = response.suggestions[0].parameters
@@ -539,7 +455,11 @@ class CommandAutoTuner:
539
455
  EventContentCreator.ResultCommandPrepareStatContent(
540
456
  command=command,
541
457
  parameters=parameters
542
- ).to_dict()))
458
+ ).to_dict()),metadata=EventMetadata(
459
+ stream_out_type="command_prepare",
460
+ path="/agentic/agent/command_prepare",
461
+ action_file=self.args.file
462
+ ).to_dict())
543
463
 
544
464
  self.execute_auto_command(command, parameters)
545
465
  content = ""
@@ -566,7 +486,13 @@ class CommandAutoTuner:
566
486
  "auto_command_action_break", command=command, action=action)
567
487
  printer.print_str_in_terminal(temp_content,style="yellow")
568
488
  get_event_manager(self.args.event_file).write_result(
569
- EventContentCreator.create_result(content=temp_content))
489
+ EventContentCreator.create_result(content=temp_content),
490
+ metadata=EventMetadata(
491
+ stream_out_type="command_break",
492
+ path="/agentic/agent/command_break",
493
+ action_file=self.args.file
494
+ ).to_dict()
495
+ )
570
496
  break
571
497
 
572
498
  if command == "response_user":
@@ -576,7 +502,12 @@ class CommandAutoTuner:
576
502
  EventContentCreator.create_result(content=EventContentCreator.ResultCommandExecuteStatContent(
577
503
  command=command,
578
504
  content=content
579
- ).to_dict()))
505
+ ).to_dict()),metadata=EventMetadata(
506
+ stream_out_type="command_execute",
507
+ path="/agentic/agent/command_execute",
508
+ action_file=self.args.file
509
+ ).to_dict()
510
+ )
580
511
 
581
512
  # 打印执行结果
582
513
  console = Console()
@@ -678,17 +609,18 @@ class CommandAutoTuner:
678
609
  # 提取 JSON 并转换为 AutoCommandResponse
679
610
  response = to_model(result, AutoCommandResponse)
680
611
  if not response or not response.suggestions:
681
- break
682
-
683
- save_to_memory_file(
684
- query=request.user_input,
685
- response=response.model_dump_json(indent=2)
686
- )
612
+ break
687
613
  else:
688
614
  temp_content = printer.get_message_from_key_with_format("auto_command_break", command=command)
689
615
  printer.print_str_in_terminal(temp_content,style="yellow")
690
616
  get_event_manager(self.args.event_file).write_result(
691
- EventContentCreator.create_result(content=temp_content))
617
+ EventContentCreator.create_result(content=temp_content),
618
+ metadata=EventMetadata(
619
+ stream_out_type="command_break",
620
+ path="/agentic/agent/command_break",
621
+ action_file=self.args.file
622
+ ).to_dict()
623
+ )
692
624
  break
693
625
 
694
626
  return response
@@ -1517,18 +1449,14 @@ class CommandAutoTuner:
1517
1449
  command_map[command](**parameters)
1518
1450
  else:
1519
1451
  command_map[command]()
1452
+ except CancelRequestedException as e:
1453
+ raise e
1520
1454
 
1521
1455
  except Exception as e:
1522
1456
  error_msg = str(e)
1523
1457
  self.printer.print_in_terminal(
1524
1458
  "auto_command_failed", style="red", command=command, error=error_msg)
1525
-
1526
- # Save failed command execution
1527
- save_to_memory_file(
1528
- query=f"Command: {command} Parameters: {json.dumps(parameters) if parameters else 'None'}",
1529
- response=f"Command execution failed: {error_msg}"
1530
- )
1531
-
1459
+
1532
1460
  self.result_manager = ResultManager()
1533
1461
  result = f"command {command} with parameters {parameters} execution failed with error {error_msg}"
1534
1462
  self.result_manager.add_result(content=result, meta={
@@ -794,7 +794,7 @@ class AutoWebTuner:
794
794
  logger.info(f"开始执行迭代 {iterations}/{max_iterations}")
795
795
 
796
796
  # 检查是否需要取消操作
797
- global_cancel.check_and_raise()
797
+ global_cancel.check_and_raise(token=self.args.event_file)
798
798
 
799
799
  # 如果没有更多操作,认为任务完成
800
800
  if not plan.actions:
@@ -48,7 +48,7 @@ from autocoder.common.mcp_server_types import (
48
48
  McpListRunningRequest, McpRefreshRequest
49
49
  )
50
50
 
51
- from autocoder.ignorefiles.ignore_file_utils import should_ignore
51
+ from autocoder.common.ignorefiles.ignore_file_utils import should_ignore
52
52
 
53
53
 
54
54
  @byzerllm.prompt()
@@ -304,6 +304,8 @@ class AutoCoderArgs(pydantic.BaseModel):
304
304
  rag_duckdb_query_similarity: Optional[float] = 0.1 # DuckDB 向量化检索 相似度 阈值
305
305
  rag_duckdb_query_top_k: Optional[int] = 10000 # DuckDB 向量化检索 返回 TopK个结果(且大于相似度)
306
306
  rag_index_build_workers: Optional[int] = 10
307
+ rag_emb_dim: Optional[int] = 1024
308
+ rag_emb_text_size: Optional[int] = 1024
307
309
  # rag 本地图床地址
308
310
  local_image_host: Optional[str] = ""
309
311
  rag_recall_max_queries: Optional[int] = 5
@@ -368,7 +370,7 @@ class AutoCoderArgs(pydantic.BaseModel):
368
370
  segment_ratio: Optional[float] = 0.2
369
371
  buff_ratio: Optional[float] = 0.1
370
372
 
371
- disable_inference_enhance: Optional[bool] = False
373
+ disable_inference_enhance: Optional[bool] = True
372
374
  inference_deep_thought: Optional[bool] = False
373
375
  inference_slow_without_deep_thought: Optional[bool] = False
374
376
  inference_compute_precision: int = 64
@@ -1,10 +1,12 @@
1
+ import os
2
+ import pydantic
3
+ from typing import Callable,Dict,Any # Added import
4
+ from pydantic import BaseModel,SkipValidation # Added import
1
5
  from prompt_toolkit.completion import Completer, Completion, CompleteEvent
2
6
  from prompt_toolkit.document import Document
3
- import pydantic
4
- from typing import Callable,Dict,Any
5
- from pydantic import BaseModel,SkipValidation
6
- from autocoder.common import AutoCoderArgs
7
- import os
7
+
8
+ from autocoder import models as models_module # Import models module
9
+ from autocoder.common import AutoCoderArgs # Ensure AutoCoderArgs is imported
8
10
 
9
11
  COMMANDS = {
10
12
  "/add_files": {
@@ -78,7 +80,7 @@ class MemoryConfig(BaseModel):
78
80
  """
79
81
  A model to encapsulate memory configuration and operations.
80
82
  """
81
- memory: Dict[str, Any]
83
+ get_memory_func: SkipValidation[Callable]
82
84
  save_memory_func: SkipValidation[Callable]
83
85
 
84
86
  class Config:
@@ -366,10 +368,13 @@ class CommandCompleter(Completer):
366
368
  self.file_system_model = file_system_model
367
369
  self.memory_model = memory_model
368
370
  self.all_file_names = file_system_model.get_all_file_names_in_project()
369
- self.all_files = file_system_model.get_all_file_in_project()
370
- self.all_dir_names = file_system_model.get_all_dir_names_in_project()
371
- self.all_files_with_dot = file_system_model.get_all_file_in_project_with_dot()
372
- self.symbol_list = file_system_model.get_symbol_list()
371
+ self.all_files = self.file_system_model.get_all_file_in_project()
372
+ self.all_dir_names = self.file_system_model.get_all_dir_names_in_project()
373
+ self.all_files_with_dot = self.file_system_model.get_all_file_in_project_with_dot()
374
+ self.symbol_list = self.file_system_model.get_symbol_list()
375
+ # Refresh model names if they can change dynamically (optional)
376
+ self.all_model_names = [m['name'] for m in models_module.load_models()]
377
+ self.all_model_names = [m['name'] for m in models_module.load_models()] # Load model names
373
378
  self.current_file_names = []
374
379
 
375
380
  def get_completions(self, document, complete_event):
@@ -544,9 +549,49 @@ class CommandCompleter(Completer):
544
549
  for field_name in AutoCoderArgs.model_fields.keys()
545
550
  if field_name.startswith(current_word)
546
551
  ]
547
-
552
+ # /conf key:[cursor] or /conf key:v[cursor]
553
+ elif ":" in text: # Check if colon exists anywhere after /conf
554
+ parts = text[len("/conf"):].strip().split(":", 1)
555
+ if len(parts) == 2:
556
+ key = parts[0].strip()
557
+ value_part = parts[1] # This is the part after the colon
558
+
559
+ # Determine the word being completed (part after colon)
560
+ # Find the start of the current word after the colon
561
+ space_pos = value_part.rfind(" ")
562
+ if space_pos != -1:
563
+ current_value_word = value_part[space_pos + 1:]
564
+ else:
565
+ current_value_word = value_part
566
+
567
+ # Model name completion
568
+ if "model" in key:
569
+ for model_name in self.all_model_names:
570
+ if model_name.startswith(current_value_word):
571
+ yield Completion(model_name, start_position=-len(current_value_word))
572
+ # Prioritize model completion if key matches
573
+ return # Exit after providing model completions
574
+
575
+ # Boolean completion
576
+ bool_keys = {name for name, field in AutoCoderArgs.model_fields.items() if field.annotation == bool}
577
+ if key in bool_keys:
578
+ if "true".startswith(current_value_word):
579
+ yield Completion("true", start_position=-len(current_value_word))
580
+ if "false".startswith(current_value_word):
581
+ yield Completion("false", start_position=-len(current_value_word))
582
+ # Prioritize boolean completion if key matches
583
+ return # Exit after providing boolean completions
584
+
585
+ # Add other value completions based on key if needed here
586
+
587
+ # No specific value completions matched, so no yield from here
588
+ return # Exit if we were trying value completion
589
+
590
+ # Default completion for keys or /drop if no value completion logic was triggered above
548
591
  for completion in completions:
549
- yield Completion(completion, start_position=-len(current_word))
592
+ # Adjust start_position based on whether we are completing key or /drop
593
+ yield Completion(completion, start_position=-len(current_word))
594
+
550
595
  elif words[0] in ["/chat", "/coding","/auto"]:
551
596
  image_extensions = (
552
597
  ".png",
@@ -895,6 +940,7 @@ class CommandCompleter(Completer):
895
940
  def update_current_files(self, files):
896
941
  self.current_file_names = [f for f in files]
897
942
 
943
+
898
944
  def refresh_files(self):
899
945
  self.all_file_names = self.file_system_model.get_all_file_names_in_project()
900
946
  self.all_files = self.file_system_model.get_all_file_in_project()