auto-coder 0.1.396__py3-none-any.whl → 0.1.398__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.396.dist-info → auto_coder-0.1.398.dist-info}/METADATA +2 -2
- {auto_coder-0.1.396.dist-info → auto_coder-0.1.398.dist-info}/RECORD +31 -12
- autocoder/auto_coder_rag.py +1 -0
- autocoder/chat_auto_coder.py +3 -0
- autocoder/common/conversations/__init__.py +84 -39
- autocoder/common/conversations/backup/__init__.py +14 -0
- autocoder/common/conversations/backup/backup_manager.py +564 -0
- autocoder/common/conversations/backup/restore_manager.py +546 -0
- autocoder/common/conversations/cache/__init__.py +16 -0
- autocoder/common/conversations/cache/base_cache.py +89 -0
- autocoder/common/conversations/cache/cache_manager.py +368 -0
- autocoder/common/conversations/cache/memory_cache.py +224 -0
- autocoder/common/conversations/config.py +195 -0
- autocoder/common/conversations/exceptions.py +72 -0
- autocoder/common/conversations/file_locker.py +145 -0
- autocoder/common/conversations/manager.py +917 -0
- autocoder/common/conversations/models.py +154 -0
- autocoder/common/conversations/search/__init__.py +15 -0
- autocoder/common/conversations/search/filter_manager.py +431 -0
- autocoder/common/conversations/search/text_searcher.py +366 -0
- autocoder/common/conversations/storage/__init__.py +16 -0
- autocoder/common/conversations/storage/base_storage.py +82 -0
- autocoder/common/conversations/storage/file_storage.py +267 -0
- autocoder/common/conversations/storage/index_manager.py +317 -0
- autocoder/common/rag_manager/rag_manager.py +16 -18
- autocoder/rags.py +74 -24
- autocoder/version.py +1 -1
- {auto_coder-0.1.396.dist-info → auto_coder-0.1.398.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.396.dist-info → auto_coder-0.1.398.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.396.dist-info → auto_coder-0.1.398.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.396.dist-info → auto_coder-0.1.398.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,366 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Text search functionality for conversations and messages.
|
|
3
|
+
|
|
4
|
+
This module provides comprehensive text search capabilities including
|
|
5
|
+
full-text search, keyword matching, and relevance-based ranking.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import re
|
|
9
|
+
import math
|
|
10
|
+
from typing import List, Dict, Any, Optional, Tuple, Set
|
|
11
|
+
from collections import Counter, defaultdict
|
|
12
|
+
|
|
13
|
+
from ..models import Conversation, ConversationMessage
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class TextSearcher:
|
|
17
|
+
"""Text searcher for conversations and messages with relevance ranking."""
|
|
18
|
+
|
|
19
|
+
def __init__(self, case_sensitive: bool = False, stemming: bool = False):
|
|
20
|
+
"""
|
|
21
|
+
Initialize text searcher.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
case_sensitive: Whether search should be case sensitive
|
|
25
|
+
stemming: Whether to apply basic stemming (simplified)
|
|
26
|
+
"""
|
|
27
|
+
self.case_sensitive = case_sensitive
|
|
28
|
+
self.stemming = stemming
|
|
29
|
+
|
|
30
|
+
# Common English stop words for filtering
|
|
31
|
+
self.stop_words = {
|
|
32
|
+
'a', 'an', 'and', 'are', 'as', 'at', 'be', 'been', 'by', 'for',
|
|
33
|
+
'from', 'has', 'he', 'in', 'is', 'it', 'its', 'of', 'on', 'that',
|
|
34
|
+
'the', 'to', 'was', 'will', 'with', 'would', 'could', 'should',
|
|
35
|
+
'have', 'had', 'has', 'do', 'does', 'did', 'can', 'may', 'might'
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
def _normalize_text(self, text: str) -> str:
|
|
39
|
+
"""Normalize text for searching."""
|
|
40
|
+
if not self.case_sensitive:
|
|
41
|
+
text = text.lower()
|
|
42
|
+
return text
|
|
43
|
+
|
|
44
|
+
def _tokenize(self, text: str) -> List[str]:
|
|
45
|
+
"""Tokenize text into words."""
|
|
46
|
+
# Simple tokenization - split on word boundaries
|
|
47
|
+
tokens = re.findall(r'\b\w+\b', text)
|
|
48
|
+
|
|
49
|
+
# Normalize tokens
|
|
50
|
+
tokens = [self._normalize_text(token) for token in tokens]
|
|
51
|
+
|
|
52
|
+
# Remove stop words if not case sensitive
|
|
53
|
+
if not self.case_sensitive:
|
|
54
|
+
tokens = [token for token in tokens if token not in self.stop_words]
|
|
55
|
+
|
|
56
|
+
# Apply basic stemming if enabled
|
|
57
|
+
if self.stemming:
|
|
58
|
+
tokens = [self._basic_stem(token) for token in tokens]
|
|
59
|
+
|
|
60
|
+
return tokens
|
|
61
|
+
|
|
62
|
+
def _basic_stem(self, word: str) -> str:
|
|
63
|
+
"""Apply very basic stemming rules."""
|
|
64
|
+
# Simple English stemming rules
|
|
65
|
+
if len(word) <= 3:
|
|
66
|
+
return word
|
|
67
|
+
|
|
68
|
+
# Remove common suffixes
|
|
69
|
+
suffixes = ['ing', 'ed', 'er', 'est', 'ly', 's']
|
|
70
|
+
for suffix in suffixes:
|
|
71
|
+
if word.endswith(suffix) and len(word) > len(suffix) + 2:
|
|
72
|
+
return word[:-len(suffix)]
|
|
73
|
+
|
|
74
|
+
return word
|
|
75
|
+
|
|
76
|
+
def _calculate_tf_idf(
|
|
77
|
+
self,
|
|
78
|
+
query_terms: List[str],
|
|
79
|
+
documents: List[Dict[str, Any]]
|
|
80
|
+
) -> Dict[str, Dict[str, float]]:
|
|
81
|
+
"""Calculate TF-IDF scores for documents."""
|
|
82
|
+
# Count documents containing each term
|
|
83
|
+
doc_count = defaultdict(int)
|
|
84
|
+
doc_terms = {}
|
|
85
|
+
|
|
86
|
+
for i, doc in enumerate(documents):
|
|
87
|
+
# Combine searchable text from document
|
|
88
|
+
searchable_text = self._get_searchable_text(doc)
|
|
89
|
+
terms = self._tokenize(searchable_text)
|
|
90
|
+
doc_terms[i] = Counter(terms)
|
|
91
|
+
|
|
92
|
+
# Count unique terms in this document
|
|
93
|
+
unique_terms = set(terms)
|
|
94
|
+
for term in unique_terms:
|
|
95
|
+
doc_count[term] += 1
|
|
96
|
+
|
|
97
|
+
# Calculate TF-IDF scores
|
|
98
|
+
total_docs = len(documents)
|
|
99
|
+
tf_idf_scores = {}
|
|
100
|
+
|
|
101
|
+
for i, doc in enumerate(documents):
|
|
102
|
+
tf_idf_scores[i] = {}
|
|
103
|
+
doc_term_counts = doc_terms[i]
|
|
104
|
+
doc_length = sum(doc_term_counts.values())
|
|
105
|
+
|
|
106
|
+
for term in query_terms:
|
|
107
|
+
if term in doc_term_counts and doc_length > 0:
|
|
108
|
+
# Term frequency
|
|
109
|
+
tf = doc_term_counts[term] / doc_length
|
|
110
|
+
|
|
111
|
+
# Inverse document frequency
|
|
112
|
+
idf = math.log(total_docs / max(1, doc_count[term]))
|
|
113
|
+
|
|
114
|
+
# TF-IDF score
|
|
115
|
+
tf_idf_scores[i][term] = tf * idf
|
|
116
|
+
else:
|
|
117
|
+
tf_idf_scores[i][term] = 0.0
|
|
118
|
+
|
|
119
|
+
return tf_idf_scores
|
|
120
|
+
|
|
121
|
+
def _get_searchable_text(self, item: Dict[str, Any]) -> str:
|
|
122
|
+
"""Extract searchable text from conversation or message."""
|
|
123
|
+
if isinstance(item, dict):
|
|
124
|
+
# Handle different item types
|
|
125
|
+
text_parts = []
|
|
126
|
+
|
|
127
|
+
# Add name and description for conversations
|
|
128
|
+
if 'name' in item:
|
|
129
|
+
text_parts.append(item['name'])
|
|
130
|
+
if 'description' in item:
|
|
131
|
+
text_parts.append(item.get('description', ''))
|
|
132
|
+
|
|
133
|
+
# Add content for messages
|
|
134
|
+
if 'content' in item:
|
|
135
|
+
content = item['content']
|
|
136
|
+
if isinstance(content, str):
|
|
137
|
+
text_parts.append(content)
|
|
138
|
+
elif isinstance(content, dict):
|
|
139
|
+
# Extract text from dict content
|
|
140
|
+
for value in content.values():
|
|
141
|
+
if isinstance(value, str):
|
|
142
|
+
text_parts.append(value)
|
|
143
|
+
elif isinstance(content, list):
|
|
144
|
+
# Extract text from list content
|
|
145
|
+
for value in content:
|
|
146
|
+
if isinstance(value, str):
|
|
147
|
+
text_parts.append(value)
|
|
148
|
+
elif isinstance(value, dict):
|
|
149
|
+
for nested_value in value.values():
|
|
150
|
+
if isinstance(nested_value, str):
|
|
151
|
+
text_parts.append(nested_value)
|
|
152
|
+
|
|
153
|
+
# Add messages content for conversations
|
|
154
|
+
if 'messages' in item:
|
|
155
|
+
for message in item.get('messages', []):
|
|
156
|
+
text_parts.append(self._get_searchable_text(message))
|
|
157
|
+
|
|
158
|
+
return ' '.join(filter(None, text_parts))
|
|
159
|
+
|
|
160
|
+
return str(item)
|
|
161
|
+
|
|
162
|
+
def search_conversations(
|
|
163
|
+
self,
|
|
164
|
+
query: str,
|
|
165
|
+
conversations: List[Dict[str, Any]],
|
|
166
|
+
max_results: Optional[int] = None,
|
|
167
|
+
min_score: float = 0.0
|
|
168
|
+
) -> List[Tuple[Dict[str, Any], float]]:
|
|
169
|
+
"""
|
|
170
|
+
Search conversations with relevance scoring.
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
query: Search query string
|
|
174
|
+
conversations: List of conversation dictionaries
|
|
175
|
+
max_results: Maximum number of results to return
|
|
176
|
+
min_score: Minimum relevance score threshold
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
List of (conversation, score) tuples sorted by relevance
|
|
180
|
+
"""
|
|
181
|
+
if not query.strip() or not conversations:
|
|
182
|
+
return [(conv, 0.0) for conv in conversations[:max_results]]
|
|
183
|
+
|
|
184
|
+
# Tokenize query
|
|
185
|
+
query_terms = self._tokenize(query)
|
|
186
|
+
if not query_terms:
|
|
187
|
+
return [(conv, 0.0) for conv in conversations[:max_results]]
|
|
188
|
+
|
|
189
|
+
# Calculate TF-IDF scores
|
|
190
|
+
tf_idf_scores = self._calculate_tf_idf(query_terms, conversations)
|
|
191
|
+
|
|
192
|
+
# Calculate relevance scores
|
|
193
|
+
results = []
|
|
194
|
+
for i, conversation in enumerate(conversations):
|
|
195
|
+
# Sum TF-IDF scores for all query terms
|
|
196
|
+
total_score = sum(tf_idf_scores[i].values())
|
|
197
|
+
|
|
198
|
+
# Apply boost for exact phrase matches
|
|
199
|
+
searchable_text = self._get_searchable_text(conversation)
|
|
200
|
+
normalized_text = self._normalize_text(searchable_text)
|
|
201
|
+
normalized_query = self._normalize_text(query)
|
|
202
|
+
|
|
203
|
+
if normalized_query in normalized_text:
|
|
204
|
+
total_score *= 1.5 # Boost for exact phrase match
|
|
205
|
+
|
|
206
|
+
# Apply boost for title matches
|
|
207
|
+
if 'name' in conversation:
|
|
208
|
+
title_text = self._normalize_text(conversation['name'])
|
|
209
|
+
if any(term in title_text for term in query_terms):
|
|
210
|
+
total_score *= 1.2 # Boost for title matches
|
|
211
|
+
|
|
212
|
+
if total_score >= min_score:
|
|
213
|
+
results.append((conversation, total_score))
|
|
214
|
+
|
|
215
|
+
# Sort by relevance score (descending)
|
|
216
|
+
results.sort(key=lambda x: x[1], reverse=True)
|
|
217
|
+
|
|
218
|
+
# Apply result limit
|
|
219
|
+
if max_results:
|
|
220
|
+
results = results[:max_results]
|
|
221
|
+
|
|
222
|
+
return results
|
|
223
|
+
|
|
224
|
+
def search_messages(
|
|
225
|
+
self,
|
|
226
|
+
query: str,
|
|
227
|
+
messages: List[Dict[str, Any]],
|
|
228
|
+
max_results: Optional[int] = None,
|
|
229
|
+
min_score: float = 0.0
|
|
230
|
+
) -> List[Tuple[Dict[str, Any], float]]:
|
|
231
|
+
"""
|
|
232
|
+
Search messages with relevance scoring.
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
query: Search query string
|
|
236
|
+
messages: List of message dictionaries
|
|
237
|
+
max_results: Maximum number of results to return
|
|
238
|
+
min_score: Minimum relevance score threshold
|
|
239
|
+
|
|
240
|
+
Returns:
|
|
241
|
+
List of (message, score) tuples sorted by relevance
|
|
242
|
+
"""
|
|
243
|
+
if not query.strip() or not messages:
|
|
244
|
+
return [(msg, 0.0) for msg in messages[:max_results]]
|
|
245
|
+
|
|
246
|
+
# Tokenize query
|
|
247
|
+
query_terms = self._tokenize(query)
|
|
248
|
+
if not query_terms:
|
|
249
|
+
return [(msg, 0.0) for msg in messages[:max_results]]
|
|
250
|
+
|
|
251
|
+
# Calculate TF-IDF scores
|
|
252
|
+
tf_idf_scores = self._calculate_tf_idf(query_terms, messages)
|
|
253
|
+
|
|
254
|
+
# Calculate relevance scores
|
|
255
|
+
results = []
|
|
256
|
+
for i, message in enumerate(messages):
|
|
257
|
+
# Sum TF-IDF scores for all query terms
|
|
258
|
+
total_score = sum(tf_idf_scores[i].values())
|
|
259
|
+
|
|
260
|
+
# Apply boost for exact phrase matches
|
|
261
|
+
searchable_text = self._get_searchable_text(message)
|
|
262
|
+
normalized_text = self._normalize_text(searchable_text)
|
|
263
|
+
normalized_query = self._normalize_text(query)
|
|
264
|
+
|
|
265
|
+
if normalized_query in normalized_text:
|
|
266
|
+
total_score *= 1.5 # Boost for exact phrase match
|
|
267
|
+
|
|
268
|
+
# Apply boost for recent messages (if timestamp available)
|
|
269
|
+
if 'timestamp' in message:
|
|
270
|
+
# Simple recency boost - more recent messages get slight boost
|
|
271
|
+
import time
|
|
272
|
+
current_time = time.time()
|
|
273
|
+
message_time = message['timestamp']
|
|
274
|
+
age_hours = (current_time - message_time) / 3600
|
|
275
|
+
|
|
276
|
+
# Boost decreases with age, but not too dramatically
|
|
277
|
+
recency_boost = max(1.0, 1.1 - (age_hours / (24 * 30))) # Diminishes over a month
|
|
278
|
+
total_score *= recency_boost
|
|
279
|
+
|
|
280
|
+
if total_score >= min_score:
|
|
281
|
+
results.append((message, total_score))
|
|
282
|
+
|
|
283
|
+
# Sort by relevance score (descending)
|
|
284
|
+
results.sort(key=lambda x: x[1], reverse=True)
|
|
285
|
+
|
|
286
|
+
# Apply result limit
|
|
287
|
+
if max_results:
|
|
288
|
+
results = results[:max_results]
|
|
289
|
+
|
|
290
|
+
return results
|
|
291
|
+
|
|
292
|
+
def highlight_matches(
|
|
293
|
+
self,
|
|
294
|
+
text: str,
|
|
295
|
+
query: str,
|
|
296
|
+
highlight_format: str = "**{}**"
|
|
297
|
+
) -> str:
|
|
298
|
+
"""
|
|
299
|
+
Highlight query matches in text.
|
|
300
|
+
|
|
301
|
+
Args:
|
|
302
|
+
text: Text to highlight matches in
|
|
303
|
+
query: Search query
|
|
304
|
+
highlight_format: Format string for highlighting (e.g., "**{}**" for bold)
|
|
305
|
+
|
|
306
|
+
Returns:
|
|
307
|
+
Text with highlighted matches
|
|
308
|
+
"""
|
|
309
|
+
if not query.strip():
|
|
310
|
+
return text
|
|
311
|
+
|
|
312
|
+
query_terms = self._tokenize(query)
|
|
313
|
+
if not query_terms:
|
|
314
|
+
return text
|
|
315
|
+
|
|
316
|
+
# Create regex pattern for all query terms
|
|
317
|
+
escaped_terms = [re.escape(term) for term in query_terms]
|
|
318
|
+
pattern = r'\b(' + '|'.join(escaped_terms) + r')\b'
|
|
319
|
+
|
|
320
|
+
# Apply highlighting
|
|
321
|
+
flags = 0 if self.case_sensitive else re.IGNORECASE
|
|
322
|
+
|
|
323
|
+
def highlight_replacer(match):
|
|
324
|
+
return highlight_format.format(match.group(1))
|
|
325
|
+
|
|
326
|
+
return re.sub(pattern, highlight_replacer, text, flags=flags)
|
|
327
|
+
|
|
328
|
+
def get_search_suggestions(
|
|
329
|
+
self,
|
|
330
|
+
partial_query: str,
|
|
331
|
+
conversations: List[Dict[str, Any]],
|
|
332
|
+
max_suggestions: int = 5
|
|
333
|
+
) -> List[str]:
|
|
334
|
+
"""
|
|
335
|
+
Get search suggestions based on partial query.
|
|
336
|
+
|
|
337
|
+
Args:
|
|
338
|
+
partial_query: Partial search query
|
|
339
|
+
conversations: List of conversations to analyze
|
|
340
|
+
max_suggestions: Maximum number of suggestions
|
|
341
|
+
|
|
342
|
+
Returns:
|
|
343
|
+
List of suggested search terms
|
|
344
|
+
"""
|
|
345
|
+
if len(partial_query) < 2:
|
|
346
|
+
return []
|
|
347
|
+
|
|
348
|
+
# Extract all terms from conversations
|
|
349
|
+
all_terms = set()
|
|
350
|
+
for conversation in conversations:
|
|
351
|
+
searchable_text = self._get_searchable_text(conversation)
|
|
352
|
+
terms = self._tokenize(searchable_text)
|
|
353
|
+
all_terms.update(terms)
|
|
354
|
+
|
|
355
|
+
# Find matching terms
|
|
356
|
+
partial_lower = partial_query.lower()
|
|
357
|
+
suggestions = []
|
|
358
|
+
|
|
359
|
+
for term in all_terms:
|
|
360
|
+
if term.lower().startswith(partial_lower) and term.lower() != partial_lower:
|
|
361
|
+
suggestions.append(term)
|
|
362
|
+
|
|
363
|
+
# Sort by length (shorter terms first) and alphabetically
|
|
364
|
+
suggestions.sort(key=lambda x: (len(x), x))
|
|
365
|
+
|
|
366
|
+
return suggestions[:max_suggestions]
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Storage module for conversation management.
|
|
3
|
+
|
|
4
|
+
This module provides storage functionality for conversations and messages,
|
|
5
|
+
including file-based storage and indexing capabilities.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from .base_storage import BaseStorage
|
|
9
|
+
from .file_storage import FileStorage
|
|
10
|
+
from .index_manager import IndexManager
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
'BaseStorage',
|
|
14
|
+
'FileStorage',
|
|
15
|
+
'IndexManager'
|
|
16
|
+
]
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
"""
|
|
2
|
+
存储基类定义
|
|
3
|
+
|
|
4
|
+
定义了对话存储的抽象接口,所有存储实现都必须继承此基类。
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from abc import ABC, abstractmethod
|
|
8
|
+
from typing import Optional, List, Dict, Any
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class BaseStorage(ABC):
|
|
12
|
+
"""存储基类,定义对话存储的抽象接口"""
|
|
13
|
+
|
|
14
|
+
@abstractmethod
|
|
15
|
+
def save_conversation(self, conversation_data: Dict[str, Any]) -> bool:
|
|
16
|
+
"""
|
|
17
|
+
保存对话数据
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
conversation_data: 对话数据字典,必须包含conversation_id
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
bool: 保存成功返回True,失败返回False
|
|
24
|
+
"""
|
|
25
|
+
pass
|
|
26
|
+
|
|
27
|
+
@abstractmethod
|
|
28
|
+
def load_conversation(self, conversation_id: str) -> Optional[Dict[str, Any]]:
|
|
29
|
+
"""
|
|
30
|
+
加载对话数据
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
conversation_id: 对话唯一标识符
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Optional[Dict[str, Any]]: 对话数据字典,不存在返回None
|
|
37
|
+
"""
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
@abstractmethod
|
|
41
|
+
def delete_conversation(self, conversation_id: str) -> bool:
|
|
42
|
+
"""
|
|
43
|
+
删除对话数据
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
conversation_id: 对话唯一标识符
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
bool: 删除成功返回True,失败返回False
|
|
50
|
+
"""
|
|
51
|
+
pass
|
|
52
|
+
|
|
53
|
+
@abstractmethod
|
|
54
|
+
def conversation_exists(self, conversation_id: str) -> bool:
|
|
55
|
+
"""
|
|
56
|
+
检查对话是否存在
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
conversation_id: 对话唯一标识符
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
bool: 存在返回True,不存在返回False
|
|
63
|
+
"""
|
|
64
|
+
pass
|
|
65
|
+
|
|
66
|
+
@abstractmethod
|
|
67
|
+
def list_conversations(
|
|
68
|
+
self,
|
|
69
|
+
limit: Optional[int] = None,
|
|
70
|
+
offset: int = 0
|
|
71
|
+
) -> List[Dict[str, Any]]:
|
|
72
|
+
"""
|
|
73
|
+
列出对话
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
limit: 限制返回数量,None表示无限制
|
|
77
|
+
offset: 偏移量
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
List[Dict[str, Any]]: 对话数据列表
|
|
81
|
+
"""
|
|
82
|
+
pass
|