autocoder-nano 0.1.30__py3-none-any.whl → 0.1.34__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. autocoder_nano/agent/agent_base.py +4 -4
  2. autocoder_nano/agent/agentic_edit.py +1584 -0
  3. autocoder_nano/agent/agentic_edit_tools/__init__.py +28 -0
  4. autocoder_nano/agent/agentic_edit_tools/ask_followup_question_tool.py +51 -0
  5. autocoder_nano/agent/agentic_edit_tools/attempt_completion_tool.py +36 -0
  6. autocoder_nano/agent/agentic_edit_tools/base_tool_resolver.py +31 -0
  7. autocoder_nano/agent/agentic_edit_tools/execute_command_tool.py +65 -0
  8. autocoder_nano/agent/agentic_edit_tools/list_code_definition_names_tool.py +78 -0
  9. autocoder_nano/agent/agentic_edit_tools/list_files_tool.py +123 -0
  10. autocoder_nano/agent/agentic_edit_tools/list_package_info_tool.py +42 -0
  11. autocoder_nano/agent/agentic_edit_tools/plan_mode_respond_tool.py +35 -0
  12. autocoder_nano/agent/agentic_edit_tools/read_file_tool.py +73 -0
  13. autocoder_nano/agent/agentic_edit_tools/replace_in_file_tool.py +148 -0
  14. autocoder_nano/agent/agentic_edit_tools/search_files_tool.py +135 -0
  15. autocoder_nano/agent/agentic_edit_tools/write_to_file_tool.py +57 -0
  16. autocoder_nano/agent/agentic_edit_types.py +151 -0
  17. autocoder_nano/auto_coder_nano.py +159 -700
  18. autocoder_nano/git_utils.py +63 -1
  19. autocoder_nano/llm_client.py +170 -3
  20. autocoder_nano/llm_types.py +72 -16
  21. autocoder_nano/rules/rules_learn.py +221 -0
  22. autocoder_nano/templates.py +1 -1
  23. autocoder_nano/utils/completer_utils.py +616 -0
  24. autocoder_nano/utils/formatted_log_utils.py +128 -0
  25. autocoder_nano/utils/printer_utils.py +5 -4
  26. autocoder_nano/utils/shell_utils.py +85 -0
  27. autocoder_nano/version.py +1 -1
  28. {autocoder_nano-0.1.30.dist-info → autocoder_nano-0.1.34.dist-info}/METADATA +3 -2
  29. {autocoder_nano-0.1.30.dist-info → autocoder_nano-0.1.34.dist-info}/RECORD +34 -16
  30. autocoder_nano/agent/new/auto_new_project.py +0 -278
  31. /autocoder_nano/{agent/new → rules}/__init__.py +0 -0
  32. {autocoder_nano-0.1.30.dist-info → autocoder_nano-0.1.34.dist-info}/LICENSE +0 -0
  33. {autocoder_nano-0.1.30.dist-info → autocoder_nano-0.1.34.dist-info}/WHEEL +0 -0
  34. {autocoder_nano-0.1.30.dist-info → autocoder_nano-0.1.34.dist-info}/entry_points.txt +0 -0
  35. {autocoder_nano-0.1.30.dist-info → autocoder_nano-0.1.34.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,5 @@
1
1
  import os
2
+ from typing import Tuple, List, Dict, Optional
2
3
 
3
4
  from autocoder_nano.llm_prompt import prompt
4
5
  from git import Repo, GitCommandError
@@ -540,4 +541,65 @@ def generate_commit_message(changes_report: str) -> str:
540
541
  {{ changes_report }}
541
542
 
542
543
  请输出commit message, 不要输出任何其他内容.
543
- '''
544
+ '''
545
+
546
+
547
+ def get_commit_changes(
548
+ repo_path: str, commit_id: str
549
+ ) -> Tuple[List[Tuple[str, List[str], Dict[str, Tuple[str, str]]]], Optional[str]]:
550
+ """ 直接从Git仓库获取指定commit的变更 """
551
+ querie_with_urls_and_changes = []
552
+ try:
553
+ repo = get_repo(repo_path)
554
+ commit = repo.commit(commit_id)
555
+ modified_files = []
556
+ changes = {}
557
+
558
+ # 检查是否是首次提交(没有父提交)
559
+ if not commit.parents:
560
+ # 首次提交,获取所有文件
561
+ for item in commit.tree.traverse():
562
+ if item.type == 'blob': # 只处理文件,不处理目录
563
+ file_path = item.path
564
+ modified_files.append(file_path)
565
+ # 首次提交前没有内容
566
+ before_content = None
567
+ # 获取提交后的内容
568
+ after_content = repo.git.show(f"{commit.hexsha}:{file_path}")
569
+ changes[file_path] = (before_content, after_content)
570
+ else:
571
+ # 获取parent commit
572
+ parent = commit.parents[0]
573
+ # 获取变更的文件列表
574
+ for diff_item in parent.diff(commit):
575
+ file_path = diff_item.a_path if diff_item.a_path else diff_item.b_path
576
+ modified_files.append(file_path)
577
+
578
+ # 获取变更前内容
579
+ before_content = None
580
+ try:
581
+ if diff_item.a_blob:
582
+ before_content = repo.git.show(f"{parent.hexsha}:{file_path}")
583
+ except GitCommandError:
584
+ pass # 文件可能是新增的
585
+
586
+ # 获取变更后内容
587
+ after_content = None
588
+ try:
589
+ if diff_item.b_blob:
590
+ after_content = repo.git.show(f"{commit.hexsha}:{file_path}")
591
+ except GitCommandError:
592
+ pass # 文件可能被删除
593
+
594
+ changes[file_path] = (before_content, after_content)
595
+
596
+ # 使用commit消息作为查询内容
597
+ query = commit.message
598
+ querie_with_urls_and_changes.append((query, modified_files, changes))
599
+
600
+ except GitCommandError as e:
601
+ printer.print_text(f"git_command_error: {e}.", style="red")
602
+ except Exception as e:
603
+ printer.print_text(f"get_commit_changes_error: {e}.", style="red")
604
+
605
+ return querie_with_urls_and_changes, None
@@ -1,10 +1,10 @@
1
- from typing import List
1
+ from typing import List, Generator, Any, Optional, Dict, Union
2
2
 
3
3
  # from loguru import logger
4
4
  from openai import OpenAI, Stream
5
5
  from openai.types.chat import ChatCompletionChunk, ChatCompletion
6
6
 
7
- from autocoder_nano.llm_types import LLMRequest, LLMResponse
7
+ from autocoder_nano.llm_types import LLMRequest, LLMResponse, AutoCoderArgs, SingleOutputMeta
8
8
  from autocoder_nano.utils.printer_utils import Printer
9
9
 
10
10
 
@@ -53,6 +53,126 @@ class AutoLLM:
53
53
  res = self._query(model, request, stream=True)
54
54
  return res
55
55
 
56
+ def stream_chat_ai_ex(
57
+ self, conversations, model: Optional[str] = None, role_mapping=None, delta_mode: bool = False,
58
+ is_reasoning: bool = False, llm_config: dict | None = None
59
+ ):
60
+ if llm_config is None:
61
+ llm_config = {}
62
+ if not model:
63
+ model = self.default_model_name
64
+
65
+ client: OpenAI = self.sub_clients[model]["client"]
66
+ model_name = self.sub_clients[model]["model_name"]
67
+
68
+ request = LLMRequest(
69
+ model=model_name,
70
+ messages=conversations,
71
+ stream=True
72
+ )
73
+
74
+ if is_reasoning:
75
+ response = client.chat.completions.create(
76
+ messages=request.messages,
77
+ model=request.model,
78
+ stream=request.stream,
79
+ stream_options={"include_usage": True},
80
+ extra_headers={
81
+ "HTTP-Referer": "https://auto-coder.chat",
82
+ "X-Title": "auto-coder-nano"
83
+ },
84
+ **llm_config
85
+ )
86
+ else:
87
+ response = client.chat.completions.create(
88
+ messages=conversations,
89
+ model=model_name,
90
+ temperature=llm_config.get("temperature", request.temperature),
91
+ max_tokens=llm_config.get("max_tokens", request.max_tokens),
92
+ top_p=llm_config.get("top_p", request.top_p),
93
+ stream=request.stream,
94
+ stream_options={"include_usage": True},
95
+ **llm_config
96
+ )
97
+
98
+ last_meta = None
99
+
100
+ if delta_mode:
101
+ for chunk in response:
102
+ if hasattr(chunk, "usage") and chunk.usage:
103
+ input_tokens_count = chunk.usage.prompt_tokens
104
+ generated_tokens_count = chunk.usage.completion_tokens
105
+ else:
106
+ input_tokens_count = 0
107
+ generated_tokens_count = 0
108
+
109
+ if not chunk.choices:
110
+ if last_meta:
111
+ yield (
112
+ "",
113
+ SingleOutputMeta(
114
+ input_tokens_count=input_tokens_count,
115
+ generated_tokens_count=generated_tokens_count,
116
+ reasoning_content="",
117
+ finish_reason=last_meta.finish_reason,
118
+ ),
119
+ )
120
+ continue
121
+
122
+ content = chunk.choices[0].delta.content or ""
123
+
124
+ reasoning_text = ""
125
+ if hasattr(chunk.choices[0].delta, "reasoning_content"):
126
+ reasoning_text = chunk.choices[0].delta.reasoning_content or ""
127
+
128
+ last_meta = SingleOutputMeta(
129
+ input_tokens_count=input_tokens_count,
130
+ generated_tokens_count=generated_tokens_count,
131
+ reasoning_content=reasoning_text,
132
+ finish_reason=chunk.choices[0].finish_reason,
133
+ )
134
+ yield content, last_meta
135
+ else:
136
+ s = ""
137
+ all_reasoning_text = ""
138
+ for chunk in response:
139
+ if hasattr(chunk, "usage") and chunk.usage:
140
+ input_tokens_count = chunk.usage.prompt_tokens
141
+ generated_tokens_count = chunk.usage.completion_tokens
142
+ else:
143
+ input_tokens_count = 0
144
+ generated_tokens_count = 0
145
+
146
+ if not chunk.choices:
147
+ if last_meta:
148
+ yield (
149
+ s,
150
+ SingleOutputMeta(
151
+ input_tokens_count=input_tokens_count,
152
+ generated_tokens_count=generated_tokens_count,
153
+ reasoning_content=all_reasoning_text,
154
+ finish_reason=last_meta.finish_reason,
155
+ ),
156
+ )
157
+ continue
158
+
159
+ content = chunk.choices[0].delta.content or ""
160
+ reasoning_text = ""
161
+ if hasattr(chunk.choices[0].delta, "reasoning_content"):
162
+ reasoning_text = chunk.choices[0].delta.reasoning_content or ""
163
+
164
+ s += content
165
+ all_reasoning_text += reasoning_text
166
+ yield (
167
+ s,
168
+ SingleOutputMeta(
169
+ input_tokens_count=input_tokens_count,
170
+ generated_tokens_count=generated_tokens_count,
171
+ reasoning_content=all_reasoning_text,
172
+ finish_reason=chunk.choices[0].finish_reason,
173
+ ),
174
+ )
175
+
56
176
  def chat_ai(self, conversations, model=None) -> LLMResponse:
57
177
  # conversations = [{"role": "user", "content": prompt_str}] deepseek-chat
58
178
  if not model and not self.default_model_name:
@@ -129,4 +249,51 @@ class AutoLLM:
129
249
  "model": res.model,
130
250
  "created": res.created
131
251
  }
132
- )
252
+ )
253
+
254
+
255
+ def stream_chat_with_continue(
256
+ llm: AutoLLM, conversations: List[dict], llm_config: dict, args: AutoCoderArgs
257
+ ) -> Generator[Any, None, None]:
258
+ """ 流式处理并继续生成内容,直到完成 """
259
+ count = 0
260
+ temp_conversations = [] + conversations
261
+ current_metadata = None
262
+ metadatas = {}
263
+ while True:
264
+ # 使用流式接口获取生成内容
265
+ stream_generator = llm.stream_chat_ai_ex(
266
+ conversations=temp_conversations,
267
+ model=args.chat_model,
268
+ delta_mode=True,
269
+ llm_config={**llm_config}
270
+ )
271
+
272
+ current_content = ""
273
+
274
+ for res in stream_generator:
275
+ content = res[0]
276
+ current_content += content
277
+ if current_metadata is None:
278
+ current_metadata = res[1]
279
+ metadatas[count] = res[1]
280
+ else:
281
+ metadatas[count] = res[1]
282
+ current_metadata.finish_reason = res[1].finish_reason
283
+ current_metadata.reasoning_content = res[1].reasoning_content
284
+
285
+ # Yield 当前的 StreamChatWithContinueResult
286
+ current_metadata.generated_tokens_count = sum([v.generated_tokens_count for _, v in metadatas.items()])
287
+ current_metadata.input_tokens_count = sum([v.input_tokens_count for _, v in metadatas.items()])
288
+ yield content, current_metadata
289
+
290
+ # 更新对话历史
291
+ temp_conversations.append({"role": "assistant", "content": current_content})
292
+
293
+ # 检查是否需要继续生成
294
+ if current_metadata.finish_reason != "length" or count >= args.generate_max_rounds:
295
+ if count >= args.generate_max_rounds:
296
+ printer.print_text(f"LLM生成达到的最大次数, 当前次数:{count}, 最大次数: {args.generate_max_rounds}, "
297
+ f"Tokens: {current_metadata.generated_tokens_count}", style="yellow")
298
+ break
299
+ count += 1
@@ -1,8 +1,8 @@
1
1
  import dataclasses
2
2
  from enum import Enum
3
- from typing import List, Dict, Any, Optional, Union, Tuple, Set
3
+ from typing import List, Dict, Any, Optional, Union, Tuple, Set, Callable
4
4
 
5
- from pydantic import BaseModel, Field
5
+ from pydantic import BaseModel, Field, SkipValidation
6
6
 
7
7
 
8
8
  class AutoCoderArgs(BaseModel):
@@ -18,6 +18,7 @@ class AutoCoderArgs(BaseModel):
18
18
  index_filter_level: Optional[int] = 0 # 用于查找相关文件的过滤级别
19
19
  index_filter_file_num: Optional[int] = -1 #
20
20
  index_filter_workers: Optional[int] = 1 # 过滤文件的线程数量
21
+ index_model_max_input_length: Optional[int] = 6000 # 模型最大输入长度
21
22
  filter_batch_size: Optional[int] = 5 #
22
23
  anti_quota_limit: Optional[int] = 1 # 请求模型时的间隔时间(s)
23
24
  skip_build_index: Optional[bool] = False # 是否跳过索引构建(索引可以帮助您通过查询找到相关文件)
@@ -46,7 +47,7 @@ class AutoCoderArgs(BaseModel):
46
47
  full_text_ratio: Optional[float] = 0.7
47
48
  segment_ratio: Optional[float] = 0.2
48
49
  buff_ratio: Optional[float] = 0.1
49
- required_exts: Optional[str] = None # 指定处理的文件后缀,例如.pdf,.doc
50
+ required_exts: Optional[str] = None # 指定处理的文件后缀,例如.pdf,.doc
50
51
  monitor_mode: bool = False # 监控模式,会监控doc_dir目录中的文件变化
51
52
  enable_hybrid_index: bool = False # 开启混合索引
52
53
  disable_auto_window: bool = False
@@ -57,26 +58,32 @@ class AutoCoderArgs(BaseModel):
57
58
  enable_rag_context: Optional[Union[bool, str]] = False
58
59
  disable_segment_reorder: bool = False
59
60
  disable_inference_enhance: bool = False
60
- duckdb_vector_dim: Optional[int] = 1024 # DuckDB 向量化存储的维度
61
+ duckdb_vector_dim: Optional[int] = 1024 # DuckDB 向量化存储的维度
61
62
  duckdb_query_similarity: Optional[float] = 0.7 # DuckDB 向量化检索 相似度 阈值
62
- duckdb_query_top_k: Optional[int] = 50 # DuckDB 向量化检索 返回 TopK个结果(且大于相似度)
63
+ duckdb_query_top_k: Optional[int] = 50 # DuckDB 向量化检索 返回 TopK个结果(且大于相似度)
63
64
 
64
65
  # Git 相关参数
65
66
  skip_commit: Optional[bool] = False
66
67
 
68
+ # Rules 相关参数
69
+ enable_rules: Optional[bool] = False
70
+
71
+ # Agent 相关参数
72
+ generate_max_rounds: Optional[int] = 5
73
+
67
74
  # 模型相关参数
68
75
  current_chat_model: Optional[str] = ""
69
76
  current_code_model: Optional[str] = ""
70
- model: Optional[str] = "" # 默认模型
71
- chat_model: Optional[str] = "" # AI Chat交互模型
72
- index_model: Optional[str] = "" # 代码索引生成模型
73
- code_model: Optional[str] = "" # 编码模型
74
- commit_model: Optional[str] = "" # Git Commit 模型
75
- emb_model: Optional[str] = "" # RAG Emb 模型
76
- recall_model: Optional[str] = "" # RAG 召回阶段模型
77
- chunk_model: Optional[str] = "" # 段落重排序模型
78
- qa_model: Optional[str] = "" # RAG 提问模型
79
- vl_model: Optional[str] = "" # 多模态模型
77
+ model: Optional[str] = "" # 默认模型
78
+ chat_model: Optional[str] = "" # AI Chat交互模型
79
+ index_model: Optional[str] = "" # 代码索引生成模型
80
+ code_model: Optional[str] = "" # 编码模型
81
+ commit_model: Optional[str] = "" # Git Commit 模型
82
+ emb_model: Optional[str] = "" # RAG Emb 模型
83
+ recall_model: Optional[str] = "" # RAG 召回阶段模型
84
+ chunk_model: Optional[str] = "" # 段落重排序模型
85
+ qa_model: Optional[str] = "" # RAG 提问模型
86
+ vl_model: Optional[str] = "" # 多模态模型
80
87
 
81
88
  class Config:
82
89
  protected_namespaces = ()
@@ -117,6 +124,14 @@ class SourceCode(BaseModel):
117
124
  metadata: Dict[str, Any] = Field(default_factory=dict)
118
125
 
119
126
 
127
+ class SourceCodeList:
128
+ def __init__(self, sources: List[SourceCode]):
129
+ self.sources = sources
130
+
131
+ def to_str(self):
132
+ return "\n".join([f"##File: {source.module_name}\n{source.source_code}\n" for source in self.sources])
133
+
134
+
120
135
  class LLMRequest(BaseModel):
121
136
  model: str # 指定使用的语言模型名称
122
137
  messages: List[Dict[str, str]] # 包含对话消息的列表,每个消息是一个字典,包含 "role"(角色)和 "content"(内容)
@@ -138,6 +153,21 @@ class LLMResponse(BaseModel):
138
153
  )
139
154
 
140
155
 
156
+ class SingleOutputMeta:
157
+ def __init__(self, input_tokens_count: int = 0,
158
+ generated_tokens_count: int = 0,
159
+ reasoning_content: str = "",
160
+ finish_reason: str = "",
161
+ first_token_time: float = 0.0,
162
+ extra_info: Dict[str, Any] = {}):
163
+ self.input_tokens_count = input_tokens_count
164
+ self.generated_tokens_count = generated_tokens_count
165
+ self.reasoning_content = reasoning_content
166
+ self.finish_reason = finish_reason
167
+ self.first_token_time = first_token_time
168
+ self.extra_info = extra_info
169
+
170
+
141
171
  class IndexItem(BaseModel):
142
172
  module_name: str
143
173
  symbols: str
@@ -211,6 +241,23 @@ class Tag(BaseModel):
211
241
  end_tag: str
212
242
 
213
243
 
244
+ class FileSystemModel(BaseModel):
245
+ project_root: str
246
+ get_all_file_names_in_project: SkipValidation[Callable]
247
+ get_all_file_in_project: SkipValidation[Callable]
248
+ get_all_dir_names_in_project: SkipValidation[Callable]
249
+ get_all_file_in_project_with_dot: SkipValidation[Callable]
250
+ get_symbol_list: SkipValidation[Callable]
251
+
252
+
253
+ class MemoryConfig(BaseModel):
254
+ get_memory_func: SkipValidation[Callable]
255
+ save_memory_func: SkipValidation[Callable]
256
+
257
+ class Config:
258
+ arbitrary_types_allowed = True
259
+
260
+
214
261
  class SymbolItem(BaseModel):
215
262
  symbol_name: str
216
263
  symbol_type: SymbolType
@@ -269,4 +316,13 @@ class FileInfo(BaseModel):
269
316
  file_path: str
270
317
  relative_path: str
271
318
  modify_time: float
272
- file_md5: str
319
+ file_md5: str
320
+
321
+
322
+ class RuleFile(BaseModel):
323
+ """规则文件的Pydantic模型"""
324
+ description: str = Field(default="", description="规则的描述")
325
+ globs: List[str] = Field(default_factory=list, description="文件匹配模式列表")
326
+ always_apply: bool = Field(default=False, description="是否总是应用规则")
327
+ content: str = Field(default="", description="规则文件的正文内容")
328
+ file_path: str = Field(default="", description="规则文件的路径")
@@ -0,0 +1,221 @@
1
+ import os
2
+ from typing import List, Tuple, Dict, Optional, Generator
3
+
4
+ from autocoder_nano.git_utils import get_commit_changes
5
+ from autocoder_nano.llm_client import AutoLLM
6
+ from autocoder_nano.llm_prompt import prompt
7
+ from autocoder_nano.llm_types import AutoCoderArgs, SourceCodeList
8
+ from autocoder_nano.utils.printer_utils import Printer
9
+
10
+
11
+ printer = Printer()
12
+
13
+
14
+ class AutoRulesLearn:
15
+
16
+ def __init__(self, args: AutoCoderArgs, llm: AutoLLM):
17
+ self.args = args
18
+ self.llm = llm
19
+
20
+ @prompt()
21
+ def _analyze_commit_changes(
22
+ self, querie_with_urls_and_changes: List[Tuple[str, List[str], Dict[str, Tuple[str, str]]]]
23
+ ):
24
+ """
25
+ 下面是用户一次提交的代码变更:
26
+ <changes>
27
+ {% for query,urls,changes in querie_with_urls_and_changes %}
28
+ ## 原始的任务需求
29
+ {{ query }}
30
+
31
+ 修改的文件:
32
+ {% for url in urls %}
33
+ - {{ url }}
34
+ {% endfor %}
35
+
36
+ 代码变更:
37
+ {% for file_path, (before, after) in changes.items() %}
38
+ ##File: {{ file_path }}
39
+ ##修改前:
40
+
41
+ {{ before or "New file" }}
42
+
43
+ ##File: {{ file_path }}
44
+ ##修改后:
45
+
46
+ {{ after or "File deleted" }}
47
+
48
+ {% endfor %}
49
+ {% endfor %}
50
+ </changes>
51
+
52
+ 请对根据上面的代码变更进行深入分析,提取具有通用价值的功能模式和设计模式,转化为可在其他项目中复用的代码规则(rules)。
53
+
54
+ - 识别代码变更中具有普遍应用价值的功能点和模式
55
+ - 将这些功能点提炼为结构化规则,便于在其他项目中快速复用
56
+ - 生成清晰的使用示例,包含完整依赖和调用方式
57
+
58
+ 最后,新生成的文件格式要是这种形态的:
59
+
60
+ <example_rules>
61
+ ---
62
+ description: [简明描述规则的功能,20字以内]
63
+ globs: [匹配应用此规则的文件路径,如"src/services/*.py"]
64
+ alwaysApply: [是否总是应用,通常为false]
65
+ ---
66
+
67
+ # [规则主标题]
68
+
69
+ ## 简要说明
70
+ [该规则的功能、适用场景和价值,100字以内]
71
+
72
+ ## 典型用法
73
+ ```python
74
+ # 完整的代码示例,包含:
75
+ # 1. 必要的import语句
76
+ # 2. 类/函数定义
77
+ # 3. 参数说明
78
+ # 4. 调用方式
79
+ # 5. 关键注释
80
+ ```
81
+
82
+ ## 依赖说明
83
+ - [必要的依赖库及版本]
84
+ - [环境要求]
85
+ - [初始化流程(如有)]
86
+
87
+ ## 学习来源
88
+ [从哪个提交变更的哪部分代码中提取的该功能点]
89
+ </example_rules>
90
+ """
91
+
92
+ @prompt()
93
+ def _analyze_modules(self, sources: SourceCodeList):
94
+ """
95
+ 下面是用户提供的需要抽取规则的代码:
96
+ <files>
97
+ {% for source in sources.sources %}
98
+ ##File: {{ source.module_name }}
99
+ {{ source.source_code }}
100
+ {% endfor %}
101
+ </files>
102
+
103
+ 请对对上面的代码进行深入分析,提取具有通用价值的功能模式和设计模式,转化为可在其他项目中复用的代码规则(rules)。
104
+
105
+ - 识别代码变更中具有普遍应用价值的功能点和模式
106
+ - 将这些功能点提炼为结构化规则,便于在其他项目中快速复用
107
+ - 生成清晰的使用示例,包含完整依赖和调用方式
108
+
109
+ 最后,新生成的文件格式要是这种形态的:
110
+
111
+ <example_rules>
112
+ ---
113
+ description: [简明描述规则的功能,20字以内]
114
+ globs: [匹配应用此规则的文件路径,如"src/services/*.py"]
115
+ alwaysApply: [是否总是应用,通常为false]
116
+ ---
117
+
118
+ # [规则主标题]
119
+
120
+ ## 简要说明
121
+ [该规则的功能、适用场景和价值,100字以内]
122
+
123
+ ## 典型用法
124
+ ```python
125
+ # 完整的代码示例,包含:
126
+ # 1. 必要的import语句
127
+ # 2. 类/函数定义
128
+ # 3. 参数说明
129
+ # 4. 调用方式
130
+ # 5. 关键注释
131
+ ```
132
+
133
+ ## 依赖说明
134
+ - [必要的依赖库及版本]
135
+ - [环境要求]
136
+ - [初始化流程(如有)]
137
+
138
+ ## 学习来源
139
+ [从哪个提交变更的哪部分代码中提取的该功能点]
140
+ </example_rules>
141
+ """
142
+
143
+ def analyze_commit_changes(
144
+ self, commit_id: str, conversations=None
145
+ ) -> str:
146
+ """ 分析指定commit的代码变更 """
147
+ if conversations is None:
148
+ conversations = []
149
+
150
+ changes, _ = get_commit_changes(self.args.source_dir, commit_id)
151
+
152
+ if not changes:
153
+ printer.print_text("未发现代码变更(Commit)", style="yellow")
154
+ return ""
155
+
156
+ try:
157
+ # 获取prompt内容
158
+ prompt_content = self._analyze_commit_changes.prompt(
159
+ querie_with_urls_and_changes=changes
160
+ )
161
+
162
+ # 准备对话历史
163
+ if conversations:
164
+ new_conversations = conversations[:-1]
165
+ else:
166
+ new_conversations = []
167
+ new_conversations.append({"role": "user", "content": prompt_content})
168
+
169
+ self.llm.setup_default_model_name(self.args.chat_model)
170
+ v = self.llm.chat_ai(new_conversations, self.args.chat_model)
171
+ return v.output
172
+ except Exception as e:
173
+ printer.print_text(f"代码变更分析失败: {e}", style="red")
174
+ return ""
175
+
176
+ def analyze_modules(
177
+ self, sources: SourceCodeList, conversations=None
178
+ ) -> str:
179
+ """ 分析给定的模块文件,根据用户需求生成可复用功能点的总结。 """
180
+
181
+ if conversations is None:
182
+ conversations = []
183
+
184
+ if not sources or not sources.sources:
185
+ printer.print_text("没有提供有效的模块文件进行分析.", style="red")
186
+ return ""
187
+
188
+ try:
189
+ # 准备 Prompt
190
+ prompt_content = self._analyze_modules.prompt(
191
+ sources=sources
192
+ )
193
+
194
+ # 准备对话历史
195
+ # 如果提供了 conversations,我们假设最后一个是用户的原始查询,替换它
196
+ if conversations:
197
+ new_conversations = conversations[:-1]
198
+ else:
199
+ new_conversations = []
200
+ new_conversations.append({"role": "user", "content": prompt_content})
201
+
202
+ self.llm.setup_default_model_name(self.args.chat_model)
203
+ v = self.llm.chat_ai(new_conversations, self.args.chat_model)
204
+ return v.output
205
+ except Exception as e:
206
+ printer.print_text(f"代码模块分析失败: {e}", style="red")
207
+ return ""
208
+
209
+ def _get_index_file_content(self) -> str:
210
+ """获取索引文件内容"""
211
+ index_file_path = os.path.join(os.path.abspath(self.args.source_dir), ".autocoderrules", "index.md")
212
+ index_file_content = ""
213
+
214
+ try:
215
+ if os.path.exists(index_file_path):
216
+ with open(index_file_path, 'r', encoding='utf-8') as f:
217
+ index_file_content = f.read()
218
+ except Exception as e:
219
+ printer.print_text(f"读取索引文件时出错: {str(e)}", style="yellow")
220
+
221
+ return index_file_content
@@ -99,7 +99,7 @@ def init_command_template(source_dir: str):
99
99
  ## 2. 查找0和1中的文件引用的相关文件
100
100
  ## 第一次建议使用0
101
101
  index_filter_level: 0
102
- index_model_max_input_length: 30000
102
+ index_model_max_input_length: 100000
103
103
 
104
104
  ## 过滤文件的线程数量
105
105
  ## 如果您有一个大项目,可以增加这个数字