auto-coder 0.1.397__py3-none-any.whl → 0.1.399__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

Files changed (85) hide show
  1. auto_coder-0.1.399.dist-info/METADATA +396 -0
  2. {auto_coder-0.1.397.dist-info → auto_coder-0.1.399.dist-info}/RECORD +81 -28
  3. {auto_coder-0.1.397.dist-info → auto_coder-0.1.399.dist-info}/WHEEL +1 -1
  4. {auto_coder-0.1.397.dist-info → auto_coder-0.1.399.dist-info}/entry_points.txt +2 -0
  5. autocoder/agent/base_agentic/base_agent.py +2 -2
  6. autocoder/agent/base_agentic/tools/replace_in_file_tool_resolver.py +1 -1
  7. autocoder/agent/entry_command_agent/__init__.py +29 -0
  8. autocoder/agent/entry_command_agent/auto_tool.py +61 -0
  9. autocoder/agent/entry_command_agent/chat.py +475 -0
  10. autocoder/agent/entry_command_agent/designer.py +53 -0
  11. autocoder/agent/entry_command_agent/generate_command.py +50 -0
  12. autocoder/agent/entry_command_agent/project_reader.py +58 -0
  13. autocoder/agent/entry_command_agent/voice2text.py +71 -0
  14. autocoder/auto_coder.py +23 -548
  15. autocoder/auto_coder_rag.py +1 -0
  16. autocoder/auto_coder_runner.py +510 -8
  17. autocoder/chat/rules_command.py +1 -1
  18. autocoder/chat_auto_coder.py +8 -0
  19. autocoder/common/ac_style_command_parser/__init__.py +15 -0
  20. autocoder/common/ac_style_command_parser/example.py +7 -0
  21. autocoder/{command_parser.py → common/ac_style_command_parser/parser.py} +1 -33
  22. autocoder/common/ac_style_command_parser/test_parser.py +516 -0
  23. autocoder/common/command_completer_v2.py +1 -1
  24. autocoder/common/command_file_manager/examples.py +22 -8
  25. autocoder/common/command_file_manager/manager.py +37 -6
  26. autocoder/common/conversations/__init__.py +84 -39
  27. autocoder/common/conversations/backup/__init__.py +14 -0
  28. autocoder/common/conversations/backup/backup_manager.py +564 -0
  29. autocoder/common/conversations/backup/restore_manager.py +546 -0
  30. autocoder/common/conversations/cache/__init__.py +16 -0
  31. autocoder/common/conversations/cache/base_cache.py +89 -0
  32. autocoder/common/conversations/cache/cache_manager.py +368 -0
  33. autocoder/common/conversations/cache/memory_cache.py +224 -0
  34. autocoder/common/conversations/config.py +195 -0
  35. autocoder/common/conversations/exceptions.py +72 -0
  36. autocoder/common/conversations/file_locker.py +145 -0
  37. autocoder/common/conversations/get_conversation_manager.py +143 -0
  38. autocoder/common/conversations/manager.py +1028 -0
  39. autocoder/common/conversations/models.py +154 -0
  40. autocoder/common/conversations/search/__init__.py +15 -0
  41. autocoder/common/conversations/search/filter_manager.py +431 -0
  42. autocoder/common/conversations/search/text_searcher.py +366 -0
  43. autocoder/common/conversations/storage/__init__.py +16 -0
  44. autocoder/common/conversations/storage/base_storage.py +82 -0
  45. autocoder/common/conversations/storage/file_storage.py +267 -0
  46. autocoder/common/conversations/storage/index_manager.py +406 -0
  47. autocoder/common/v2/agent/agentic_edit.py +131 -18
  48. autocoder/common/v2/agent/agentic_edit_types.py +10 -0
  49. autocoder/common/v2/code_auto_generate_editblock.py +10 -2
  50. autocoder/dispacher/__init__.py +10 -0
  51. autocoder/rags.py +73 -50
  52. autocoder/run_context.py +1 -0
  53. autocoder/sdk/__init__.py +188 -0
  54. autocoder/sdk/cli/__init__.py +15 -0
  55. autocoder/sdk/cli/__main__.py +26 -0
  56. autocoder/sdk/cli/completion_wrapper.py +38 -0
  57. autocoder/sdk/cli/formatters.py +211 -0
  58. autocoder/sdk/cli/handlers.py +174 -0
  59. autocoder/sdk/cli/install_completion.py +301 -0
  60. autocoder/sdk/cli/main.py +284 -0
  61. autocoder/sdk/cli/options.py +72 -0
  62. autocoder/sdk/constants.py +102 -0
  63. autocoder/sdk/core/__init__.py +20 -0
  64. autocoder/sdk/core/auto_coder_core.py +867 -0
  65. autocoder/sdk/core/bridge.py +497 -0
  66. autocoder/sdk/example.py +0 -0
  67. autocoder/sdk/exceptions.py +72 -0
  68. autocoder/sdk/models/__init__.py +19 -0
  69. autocoder/sdk/models/messages.py +209 -0
  70. autocoder/sdk/models/options.py +194 -0
  71. autocoder/sdk/models/responses.py +311 -0
  72. autocoder/sdk/session/__init__.py +32 -0
  73. autocoder/sdk/session/session.py +106 -0
  74. autocoder/sdk/session/session_manager.py +56 -0
  75. autocoder/sdk/utils/__init__.py +24 -0
  76. autocoder/sdk/utils/formatters.py +216 -0
  77. autocoder/sdk/utils/io_utils.py +302 -0
  78. autocoder/sdk/utils/validators.py +287 -0
  79. autocoder/version.py +2 -1
  80. auto_coder-0.1.397.dist-info/METADATA +0 -111
  81. autocoder/common/conversations/compatibility.py +0 -303
  82. autocoder/common/conversations/conversation_manager.py +0 -502
  83. autocoder/common/conversations/example.py +0 -152
  84. {auto_coder-0.1.397.dist-info → auto_coder-0.1.399.dist-info/licenses}/LICENSE +0 -0
  85. {auto_coder-0.1.397.dist-info → auto_coder-0.1.399.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,366 @@
1
+ """
2
+ Text search functionality for conversations and messages.
3
+
4
+ This module provides comprehensive text search capabilities including
5
+ full-text search, keyword matching, and relevance-based ranking.
6
+ """
7
+
8
+ import re
9
+ import math
10
+ from typing import List, Dict, Any, Optional, Tuple, Set
11
+ from collections import Counter, defaultdict
12
+
13
+ from ..models import Conversation, ConversationMessage
14
+
15
+
16
+ class TextSearcher:
17
+ """Text searcher for conversations and messages with relevance ranking."""
18
+
19
+ def __init__(self, case_sensitive: bool = False, stemming: bool = False):
20
+ """
21
+ Initialize text searcher.
22
+
23
+ Args:
24
+ case_sensitive: Whether search should be case sensitive
25
+ stemming: Whether to apply basic stemming (simplified)
26
+ """
27
+ self.case_sensitive = case_sensitive
28
+ self.stemming = stemming
29
+
30
+ # Common English stop words for filtering
31
+ self.stop_words = {
32
+ 'a', 'an', 'and', 'are', 'as', 'at', 'be', 'been', 'by', 'for',
33
+ 'from', 'has', 'he', 'in', 'is', 'it', 'its', 'of', 'on', 'that',
34
+ 'the', 'to', 'was', 'will', 'with', 'would', 'could', 'should',
35
+ 'have', 'had', 'has', 'do', 'does', 'did', 'can', 'may', 'might'
36
+ }
37
+
38
+ def _normalize_text(self, text: str) -> str:
39
+ """Normalize text for searching."""
40
+ if not self.case_sensitive:
41
+ text = text.lower()
42
+ return text
43
+
44
+ def _tokenize(self, text: str) -> List[str]:
45
+ """Tokenize text into words."""
46
+ # Simple tokenization - split on word boundaries
47
+ tokens = re.findall(r'\b\w+\b', text)
48
+
49
+ # Normalize tokens
50
+ tokens = [self._normalize_text(token) for token in tokens]
51
+
52
+ # Remove stop words if not case sensitive
53
+ if not self.case_sensitive:
54
+ tokens = [token for token in tokens if token not in self.stop_words]
55
+
56
+ # Apply basic stemming if enabled
57
+ if self.stemming:
58
+ tokens = [self._basic_stem(token) for token in tokens]
59
+
60
+ return tokens
61
+
62
+ def _basic_stem(self, word: str) -> str:
63
+ """Apply very basic stemming rules."""
64
+ # Simple English stemming rules
65
+ if len(word) <= 3:
66
+ return word
67
+
68
+ # Remove common suffixes
69
+ suffixes = ['ing', 'ed', 'er', 'est', 'ly', 's']
70
+ for suffix in suffixes:
71
+ if word.endswith(suffix) and len(word) > len(suffix) + 2:
72
+ return word[:-len(suffix)]
73
+
74
+ return word
75
+
76
+ def _calculate_tf_idf(
77
+ self,
78
+ query_terms: List[str],
79
+ documents: List[Dict[str, Any]]
80
+ ) -> Dict[str, Dict[str, float]]:
81
+ """Calculate TF-IDF scores for documents."""
82
+ # Count documents containing each term
83
+ doc_count = defaultdict(int)
84
+ doc_terms = {}
85
+
86
+ for i, doc in enumerate(documents):
87
+ # Combine searchable text from document
88
+ searchable_text = self._get_searchable_text(doc)
89
+ terms = self._tokenize(searchable_text)
90
+ doc_terms[i] = Counter(terms)
91
+
92
+ # Count unique terms in this document
93
+ unique_terms = set(terms)
94
+ for term in unique_terms:
95
+ doc_count[term] += 1
96
+
97
+ # Calculate TF-IDF scores
98
+ total_docs = len(documents)
99
+ tf_idf_scores = {}
100
+
101
+ for i, doc in enumerate(documents):
102
+ tf_idf_scores[i] = {}
103
+ doc_term_counts = doc_terms[i]
104
+ doc_length = sum(doc_term_counts.values())
105
+
106
+ for term in query_terms:
107
+ if term in doc_term_counts and doc_length > 0:
108
+ # Term frequency
109
+ tf = doc_term_counts[term] / doc_length
110
+
111
+ # Inverse document frequency
112
+ idf = math.log(total_docs / max(1, doc_count[term]))
113
+
114
+ # TF-IDF score
115
+ tf_idf_scores[i][term] = tf * idf
116
+ else:
117
+ tf_idf_scores[i][term] = 0.0
118
+
119
+ return tf_idf_scores
120
+
121
+ def _get_searchable_text(self, item: Dict[str, Any]) -> str:
122
+ """Extract searchable text from conversation or message."""
123
+ if isinstance(item, dict):
124
+ # Handle different item types
125
+ text_parts = []
126
+
127
+ # Add name and description for conversations
128
+ if 'name' in item:
129
+ text_parts.append(item['name'])
130
+ if 'description' in item:
131
+ text_parts.append(item.get('description', ''))
132
+
133
+ # Add content for messages
134
+ if 'content' in item:
135
+ content = item['content']
136
+ if isinstance(content, str):
137
+ text_parts.append(content)
138
+ elif isinstance(content, dict):
139
+ # Extract text from dict content
140
+ for value in content.values():
141
+ if isinstance(value, str):
142
+ text_parts.append(value)
143
+ elif isinstance(content, list):
144
+ # Extract text from list content
145
+ for value in content:
146
+ if isinstance(value, str):
147
+ text_parts.append(value)
148
+ elif isinstance(value, dict):
149
+ for nested_value in value.values():
150
+ if isinstance(nested_value, str):
151
+ text_parts.append(nested_value)
152
+
153
+ # Add messages content for conversations
154
+ if 'messages' in item:
155
+ for message in item.get('messages', []):
156
+ text_parts.append(self._get_searchable_text(message))
157
+
158
+ return ' '.join(filter(None, text_parts))
159
+
160
+ return str(item)
161
+
162
+ def search_conversations(
163
+ self,
164
+ query: str,
165
+ conversations: List[Dict[str, Any]],
166
+ max_results: Optional[int] = None,
167
+ min_score: float = 0.0
168
+ ) -> List[Tuple[Dict[str, Any], float]]:
169
+ """
170
+ Search conversations with relevance scoring.
171
+
172
+ Args:
173
+ query: Search query string
174
+ conversations: List of conversation dictionaries
175
+ max_results: Maximum number of results to return
176
+ min_score: Minimum relevance score threshold
177
+
178
+ Returns:
179
+ List of (conversation, score) tuples sorted by relevance
180
+ """
181
+ if not query.strip() or not conversations:
182
+ return [(conv, 0.0) for conv in conversations[:max_results]]
183
+
184
+ # Tokenize query
185
+ query_terms = self._tokenize(query)
186
+ if not query_terms:
187
+ return [(conv, 0.0) for conv in conversations[:max_results]]
188
+
189
+ # Calculate TF-IDF scores
190
+ tf_idf_scores = self._calculate_tf_idf(query_terms, conversations)
191
+
192
+ # Calculate relevance scores
193
+ results = []
194
+ for i, conversation in enumerate(conversations):
195
+ # Sum TF-IDF scores for all query terms
196
+ total_score = sum(tf_idf_scores[i].values())
197
+
198
+ # Apply boost for exact phrase matches
199
+ searchable_text = self._get_searchable_text(conversation)
200
+ normalized_text = self._normalize_text(searchable_text)
201
+ normalized_query = self._normalize_text(query)
202
+
203
+ if normalized_query in normalized_text:
204
+ total_score *= 1.5 # Boost for exact phrase match
205
+
206
+ # Apply boost for title matches
207
+ if 'name' in conversation:
208
+ title_text = self._normalize_text(conversation['name'])
209
+ if any(term in title_text for term in query_terms):
210
+ total_score *= 1.2 # Boost for title matches
211
+
212
+ if total_score >= min_score:
213
+ results.append((conversation, total_score))
214
+
215
+ # Sort by relevance score (descending)
216
+ results.sort(key=lambda x: x[1], reverse=True)
217
+
218
+ # Apply result limit
219
+ if max_results:
220
+ results = results[:max_results]
221
+
222
+ return results
223
+
224
+ def search_messages(
225
+ self,
226
+ query: str,
227
+ messages: List[Dict[str, Any]],
228
+ max_results: Optional[int] = None,
229
+ min_score: float = 0.0
230
+ ) -> List[Tuple[Dict[str, Any], float]]:
231
+ """
232
+ Search messages with relevance scoring.
233
+
234
+ Args:
235
+ query: Search query string
236
+ messages: List of message dictionaries
237
+ max_results: Maximum number of results to return
238
+ min_score: Minimum relevance score threshold
239
+
240
+ Returns:
241
+ List of (message, score) tuples sorted by relevance
242
+ """
243
+ if not query.strip() or not messages:
244
+ return [(msg, 0.0) for msg in messages[:max_results]]
245
+
246
+ # Tokenize query
247
+ query_terms = self._tokenize(query)
248
+ if not query_terms:
249
+ return [(msg, 0.0) for msg in messages[:max_results]]
250
+
251
+ # Calculate TF-IDF scores
252
+ tf_idf_scores = self._calculate_tf_idf(query_terms, messages)
253
+
254
+ # Calculate relevance scores
255
+ results = []
256
+ for i, message in enumerate(messages):
257
+ # Sum TF-IDF scores for all query terms
258
+ total_score = sum(tf_idf_scores[i].values())
259
+
260
+ # Apply boost for exact phrase matches
261
+ searchable_text = self._get_searchable_text(message)
262
+ normalized_text = self._normalize_text(searchable_text)
263
+ normalized_query = self._normalize_text(query)
264
+
265
+ if normalized_query in normalized_text:
266
+ total_score *= 1.5 # Boost for exact phrase match
267
+
268
+ # Apply boost for recent messages (if timestamp available)
269
+ if 'timestamp' in message:
270
+ # Simple recency boost - more recent messages get slight boost
271
+ import time
272
+ current_time = time.time()
273
+ message_time = message['timestamp']
274
+ age_hours = (current_time - message_time) / 3600
275
+
276
+ # Boost decreases with age, but not too dramatically
277
+ recency_boost = max(1.0, 1.1 - (age_hours / (24 * 30))) # Diminishes over a month
278
+ total_score *= recency_boost
279
+
280
+ if total_score >= min_score:
281
+ results.append((message, total_score))
282
+
283
+ # Sort by relevance score (descending)
284
+ results.sort(key=lambda x: x[1], reverse=True)
285
+
286
+ # Apply result limit
287
+ if max_results:
288
+ results = results[:max_results]
289
+
290
+ return results
291
+
292
+ def highlight_matches(
293
+ self,
294
+ text: str,
295
+ query: str,
296
+ highlight_format: str = "**{}**"
297
+ ) -> str:
298
+ """
299
+ Highlight query matches in text.
300
+
301
+ Args:
302
+ text: Text to highlight matches in
303
+ query: Search query
304
+ highlight_format: Format string for highlighting (e.g., "**{}**" for bold)
305
+
306
+ Returns:
307
+ Text with highlighted matches
308
+ """
309
+ if not query.strip():
310
+ return text
311
+
312
+ query_terms = self._tokenize(query)
313
+ if not query_terms:
314
+ return text
315
+
316
+ # Create regex pattern for all query terms
317
+ escaped_terms = [re.escape(term) for term in query_terms]
318
+ pattern = r'\b(' + '|'.join(escaped_terms) + r')\b'
319
+
320
+ # Apply highlighting
321
+ flags = 0 if self.case_sensitive else re.IGNORECASE
322
+
323
+ def highlight_replacer(match):
324
+ return highlight_format.format(match.group(1))
325
+
326
+ return re.sub(pattern, highlight_replacer, text, flags=flags)
327
+
328
+ def get_search_suggestions(
329
+ self,
330
+ partial_query: str,
331
+ conversations: List[Dict[str, Any]],
332
+ max_suggestions: int = 5
333
+ ) -> List[str]:
334
+ """
335
+ Get search suggestions based on partial query.
336
+
337
+ Args:
338
+ partial_query: Partial search query
339
+ conversations: List of conversations to analyze
340
+ max_suggestions: Maximum number of suggestions
341
+
342
+ Returns:
343
+ List of suggested search terms
344
+ """
345
+ if len(partial_query) < 2:
346
+ return []
347
+
348
+ # Extract all terms from conversations
349
+ all_terms = set()
350
+ for conversation in conversations:
351
+ searchable_text = self._get_searchable_text(conversation)
352
+ terms = self._tokenize(searchable_text)
353
+ all_terms.update(terms)
354
+
355
+ # Find matching terms
356
+ partial_lower = partial_query.lower()
357
+ suggestions = []
358
+
359
+ for term in all_terms:
360
+ if term.lower().startswith(partial_lower) and term.lower() != partial_lower:
361
+ suggestions.append(term)
362
+
363
+ # Sort by length (shorter terms first) and alphabetically
364
+ suggestions.sort(key=lambda x: (len(x), x))
365
+
366
+ return suggestions[:max_suggestions]
@@ -0,0 +1,16 @@
1
+ """
2
+ Storage module for conversation management.
3
+
4
+ This module provides storage functionality for conversations and messages,
5
+ including file-based storage and indexing capabilities.
6
+ """
7
+
8
+ from .base_storage import BaseStorage
9
+ from .file_storage import FileStorage
10
+ from .index_manager import IndexManager
11
+
12
+ __all__ = [
13
+ 'BaseStorage',
14
+ 'FileStorage',
15
+ 'IndexManager'
16
+ ]
@@ -0,0 +1,82 @@
1
+ """
2
+ 存储基类定义
3
+
4
+ 定义了对话存储的抽象接口,所有存储实现都必须继承此基类。
5
+ """
6
+
7
+ from abc import ABC, abstractmethod
8
+ from typing import Optional, List, Dict, Any
9
+
10
+
11
+ class BaseStorage(ABC):
12
+ """存储基类,定义对话存储的抽象接口"""
13
+
14
+ @abstractmethod
15
+ def save_conversation(self, conversation_data: Dict[str, Any]) -> bool:
16
+ """
17
+ 保存对话数据
18
+
19
+ Args:
20
+ conversation_data: 对话数据字典,必须包含conversation_id
21
+
22
+ Returns:
23
+ bool: 保存成功返回True,失败返回False
24
+ """
25
+ pass
26
+
27
+ @abstractmethod
28
+ def load_conversation(self, conversation_id: str) -> Optional[Dict[str, Any]]:
29
+ """
30
+ 加载对话数据
31
+
32
+ Args:
33
+ conversation_id: 对话唯一标识符
34
+
35
+ Returns:
36
+ Optional[Dict[str, Any]]: 对话数据字典,不存在返回None
37
+ """
38
+ pass
39
+
40
+ @abstractmethod
41
+ def delete_conversation(self, conversation_id: str) -> bool:
42
+ """
43
+ 删除对话数据
44
+
45
+ Args:
46
+ conversation_id: 对话唯一标识符
47
+
48
+ Returns:
49
+ bool: 删除成功返回True,失败返回False
50
+ """
51
+ pass
52
+
53
+ @abstractmethod
54
+ def conversation_exists(self, conversation_id: str) -> bool:
55
+ """
56
+ 检查对话是否存在
57
+
58
+ Args:
59
+ conversation_id: 对话唯一标识符
60
+
61
+ Returns:
62
+ bool: 存在返回True,不存在返回False
63
+ """
64
+ pass
65
+
66
+ @abstractmethod
67
+ def list_conversations(
68
+ self,
69
+ limit: Optional[int] = None,
70
+ offset: int = 0
71
+ ) -> List[Dict[str, Any]]:
72
+ """
73
+ 列出对话
74
+
75
+ Args:
76
+ limit: 限制返回数量,None表示无限制
77
+ offset: 偏移量
78
+
79
+ Returns:
80
+ List[Dict[str, Any]]: 对话数据列表
81
+ """
82
+ pass