AstrBot 4.3.5__py3-none-any.whl → 4.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. astrbot/core/agent/runners/tool_loop_agent_runner.py +31 -2
  2. astrbot/core/astrbot_config_mgr.py +23 -51
  3. astrbot/core/config/default.py +92 -12
  4. astrbot/core/conversation_mgr.py +36 -1
  5. astrbot/core/core_lifecycle.py +24 -5
  6. astrbot/core/db/migration/migra_45_to_46.py +44 -0
  7. astrbot/core/db/vec_db/base.py +33 -2
  8. astrbot/core/db/vec_db/faiss_impl/document_storage.py +310 -52
  9. astrbot/core/db/vec_db/faiss_impl/embedding_storage.py +31 -3
  10. astrbot/core/db/vec_db/faiss_impl/vec_db.py +81 -23
  11. astrbot/core/file_token_service.py +6 -1
  12. astrbot/core/initial_loader.py +6 -3
  13. astrbot/core/knowledge_base/chunking/__init__.py +11 -0
  14. astrbot/core/knowledge_base/chunking/base.py +24 -0
  15. astrbot/core/knowledge_base/chunking/fixed_size.py +57 -0
  16. astrbot/core/knowledge_base/chunking/recursive.py +155 -0
  17. astrbot/core/knowledge_base/kb_db_sqlite.py +299 -0
  18. astrbot/core/knowledge_base/kb_helper.py +348 -0
  19. astrbot/core/knowledge_base/kb_mgr.py +287 -0
  20. astrbot/core/knowledge_base/models.py +114 -0
  21. astrbot/core/knowledge_base/parsers/__init__.py +15 -0
  22. astrbot/core/knowledge_base/parsers/base.py +50 -0
  23. astrbot/core/knowledge_base/parsers/markitdown_parser.py +25 -0
  24. astrbot/core/knowledge_base/parsers/pdf_parser.py +100 -0
  25. astrbot/core/knowledge_base/parsers/text_parser.py +41 -0
  26. astrbot/core/knowledge_base/parsers/util.py +13 -0
  27. astrbot/core/knowledge_base/retrieval/__init__.py +16 -0
  28. astrbot/core/knowledge_base/retrieval/hit_stopwords.txt +767 -0
  29. astrbot/core/knowledge_base/retrieval/manager.py +273 -0
  30. astrbot/core/knowledge_base/retrieval/rank_fusion.py +138 -0
  31. astrbot/core/knowledge_base/retrieval/sparse_retriever.py +130 -0
  32. astrbot/core/pipeline/process_stage/method/llm_request.py +29 -7
  33. astrbot/core/pipeline/process_stage/utils.py +80 -0
  34. astrbot/core/platform/astr_message_event.py +8 -7
  35. astrbot/core/platform/sources/misskey/misskey_adapter.py +380 -44
  36. astrbot/core/platform/sources/misskey/misskey_api.py +581 -45
  37. astrbot/core/platform/sources/misskey/misskey_event.py +76 -41
  38. astrbot/core/platform/sources/misskey/misskey_utils.py +254 -43
  39. astrbot/core/platform/sources/qqofficial_webhook/qo_webhook_server.py +2 -1
  40. astrbot/core/platform/sources/satori/satori_adapter.py +27 -1
  41. astrbot/core/platform/sources/satori/satori_event.py +270 -99
  42. astrbot/core/provider/manager.py +14 -9
  43. astrbot/core/provider/provider.py +67 -0
  44. astrbot/core/provider/sources/anthropic_source.py +4 -4
  45. astrbot/core/provider/sources/dashscope_source.py +10 -9
  46. astrbot/core/provider/sources/dify_source.py +6 -8
  47. astrbot/core/provider/sources/gemini_embedding_source.py +1 -2
  48. astrbot/core/provider/sources/openai_embedding_source.py +1 -2
  49. astrbot/core/provider/sources/openai_source.py +18 -15
  50. astrbot/core/provider/sources/openai_tts_api_source.py +1 -1
  51. astrbot/core/star/context.py +3 -0
  52. astrbot/core/star/star.py +6 -0
  53. astrbot/core/star/star_manager.py +13 -7
  54. astrbot/core/umop_config_router.py +81 -0
  55. astrbot/core/updator.py +1 -1
  56. astrbot/core/utils/io.py +23 -12
  57. astrbot/dashboard/routes/__init__.py +2 -0
  58. astrbot/dashboard/routes/config.py +137 -9
  59. astrbot/dashboard/routes/knowledge_base.py +1065 -0
  60. astrbot/dashboard/routes/plugin.py +24 -5
  61. astrbot/dashboard/routes/update.py +1 -1
  62. astrbot/dashboard/server.py +6 -0
  63. astrbot/dashboard/utils.py +161 -0
  64. {astrbot-4.3.5.dist-info → astrbot-4.5.0.dist-info}/METADATA +29 -13
  65. {astrbot-4.3.5.dist-info → astrbot-4.5.0.dist-info}/RECORD +68 -44
  66. {astrbot-4.3.5.dist-info → astrbot-4.5.0.dist-info}/WHEEL +0 -0
  67. {astrbot-4.3.5.dist-info → astrbot-4.5.0.dist-info}/entry_points.txt +0 -0
  68. {astrbot-4.3.5.dist-info → astrbot-4.5.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,348 @@
1
+ import uuid
2
+ import aiofiles
3
+ import json
4
+ from pathlib import Path
5
+ from .models import KnowledgeBase, KBDocument, KBMedia
6
+ from .kb_db_sqlite import KBSQLiteDatabase
7
+ from astrbot.core.db.vec_db.base import BaseVecDB
8
+ from astrbot.core.db.vec_db.faiss_impl.vec_db import FaissVecDB
9
+ from astrbot.core.provider.provider import EmbeddingProvider, RerankProvider
10
+ from astrbot.core.provider.manager import ProviderManager
11
+ from .parsers.util import select_parser
12
+ from .chunking.base import BaseChunker
13
+ from astrbot.core import logger
14
+
15
+
16
+ class KBHelper:
17
+ vec_db: BaseVecDB
18
+ kb: KnowledgeBase
19
+
20
+ def __init__(
21
+ self,
22
+ kb_db: KBSQLiteDatabase,
23
+ kb: KnowledgeBase,
24
+ provider_manager: ProviderManager,
25
+ kb_root_dir: str,
26
+ chunker: BaseChunker,
27
+ ):
28
+ self.kb_db = kb_db
29
+ self.kb = kb
30
+ self.prov_mgr = provider_manager
31
+ self.kb_root_dir = kb_root_dir
32
+ self.chunker = chunker
33
+
34
+ self.kb_dir = Path(self.kb_root_dir) / self.kb.kb_id
35
+ self.kb_medias_dir = Path(self.kb_dir) / "medias" / self.kb.kb_id
36
+ self.kb_files_dir = Path(self.kb_dir) / "files" / self.kb.kb_id
37
+
38
+ self.kb_medias_dir.mkdir(parents=True, exist_ok=True)
39
+ self.kb_files_dir.mkdir(parents=True, exist_ok=True)
40
+
41
+ async def initialize(self):
42
+ await self._ensure_vec_db()
43
+
44
+ async def get_ep(self) -> EmbeddingProvider:
45
+ if not self.kb.embedding_provider_id:
46
+ raise ValueError(f"知识库 {self.kb.kb_name} 未配置 Embedding Provider")
47
+ ep: EmbeddingProvider = await self.prov_mgr.get_provider_by_id(
48
+ self.kb.embedding_provider_id
49
+ ) # type: ignore
50
+ if not ep:
51
+ raise ValueError(
52
+ f"无法找到 ID 为 {self.kb.embedding_provider_id} 的 Embedding Provider"
53
+ )
54
+ return ep
55
+
56
+ async def get_rp(self) -> RerankProvider | None:
57
+ if not self.kb.rerank_provider_id:
58
+ return None
59
+ rp: RerankProvider = await self.prov_mgr.get_provider_by_id(
60
+ self.kb.rerank_provider_id
61
+ ) # type: ignore
62
+ if not rp:
63
+ raise ValueError(
64
+ f"无法找到 ID 为 {self.kb.rerank_provider_id} 的 Rerank Provider"
65
+ )
66
+ return rp
67
+
68
+ async def _ensure_vec_db(self) -> FaissVecDB:
69
+ if not self.kb.embedding_provider_id:
70
+ raise ValueError(f"知识库 {self.kb.kb_name} 未配置 Embedding Provider")
71
+
72
+ ep = await self.get_ep()
73
+ rp = await self.get_rp()
74
+
75
+ vec_db = FaissVecDB(
76
+ doc_store_path=str(self.kb_dir / "doc.db"),
77
+ index_store_path=str(self.kb_dir / "index.faiss"),
78
+ embedding_provider=ep,
79
+ rerank_provider=rp,
80
+ )
81
+ await vec_db.initialize()
82
+ self.vec_db = vec_db
83
+ return vec_db
84
+
85
+ async def delete_vec_db(self):
86
+ """删除知识库的向量数据库和所有相关文件"""
87
+ import shutil
88
+
89
+ await self.terminate()
90
+ if self.kb_dir.exists():
91
+ shutil.rmtree(self.kb_dir)
92
+
93
+ async def terminate(self):
94
+ if self.vec_db:
95
+ await self.vec_db.close()
96
+
97
+ async def upload_document(
98
+ self,
99
+ file_name: str,
100
+ file_content: bytes,
101
+ file_type: str,
102
+ chunk_size: int = 512,
103
+ chunk_overlap: int = 50,
104
+ batch_size: int = 32,
105
+ tasks_limit: int = 3,
106
+ max_retries: int = 3,
107
+ progress_callback=None,
108
+ ) -> KBDocument:
109
+ """上传并处理文档(带原子性保证和失败清理)
110
+
111
+ 流程:
112
+ 1. 保存原始文件
113
+ 2. 解析文档内容
114
+ 3. 提取多媒体资源
115
+ 4. 分块处理
116
+ 5. 生成向量并存储
117
+ 6. 保存元数据(事务)
118
+ 7. 更新统计
119
+
120
+ Args:
121
+ progress_callback: 进度回调函数,接收参数 (stage, current, total)
122
+ - stage: 当前阶段 ('parsing', 'chunking', 'embedding')
123
+ - current: 当前进度
124
+ - total: 总数
125
+ """
126
+ await self._ensure_vec_db()
127
+ doc_id = str(uuid.uuid4())
128
+ media_paths: list[Path] = []
129
+
130
+ # file_path = self.kb_files_dir / f"{doc_id}.{file_type}"
131
+ # async with aiofiles.open(file_path, "wb") as f:
132
+ # await f.write(file_content)
133
+
134
+ try:
135
+ # 阶段1: 解析文档
136
+ if progress_callback:
137
+ await progress_callback("parsing", 0, 100)
138
+
139
+ parser = await select_parser(f".{file_type}")
140
+ parse_result = await parser.parse(file_content, file_name)
141
+ text_content = parse_result.text
142
+ media_items = parse_result.media
143
+
144
+ if progress_callback:
145
+ await progress_callback("parsing", 100, 100)
146
+
147
+ # 保存媒体文件
148
+ saved_media = []
149
+ for media_item in media_items:
150
+ media = await self._save_media(
151
+ doc_id=doc_id,
152
+ media_type=media_item.media_type,
153
+ file_name=media_item.file_name,
154
+ content=media_item.content,
155
+ mime_type=media_item.mime_type,
156
+ )
157
+ saved_media.append(media)
158
+ media_paths.append(Path(media.file_path))
159
+
160
+ # 阶段2: 分块
161
+ if progress_callback:
162
+ await progress_callback("chunking", 0, 100)
163
+
164
+ chunks_text = await self.chunker.chunk(
165
+ text_content, chunk_size=chunk_size, chunk_overlap=chunk_overlap
166
+ )
167
+ contents = []
168
+ metadatas = []
169
+ for idx, chunk_text in enumerate(chunks_text):
170
+ contents.append(chunk_text)
171
+ metadatas.append(
172
+ {
173
+ "kb_id": self.kb.kb_id,
174
+ "kb_doc_id": doc_id,
175
+ "chunk_index": idx,
176
+ }
177
+ )
178
+
179
+ if progress_callback:
180
+ await progress_callback("chunking", 100, 100)
181
+
182
+ # 阶段3: 生成向量(带进度回调)
183
+ async def embedding_progress_callback(current, total):
184
+ if progress_callback:
185
+ await progress_callback("embedding", current, total)
186
+
187
+ await self.vec_db.insert_batch(
188
+ contents=contents,
189
+ metadatas=metadatas,
190
+ batch_size=batch_size,
191
+ tasks_limit=tasks_limit,
192
+ max_retries=max_retries,
193
+ progress_callback=embedding_progress_callback,
194
+ )
195
+
196
+ # 保存文档的元数据
197
+ doc = KBDocument(
198
+ doc_id=doc_id,
199
+ kb_id=self.kb.kb_id,
200
+ doc_name=file_name,
201
+ file_type=file_type,
202
+ file_size=len(file_content),
203
+ # file_path=str(file_path),
204
+ file_path="",
205
+ chunk_count=len(chunks_text),
206
+ media_count=0,
207
+ )
208
+ async with self.kb_db.get_db() as session:
209
+ async with session.begin():
210
+ session.add(doc)
211
+ for media in saved_media:
212
+ session.add(media)
213
+ await session.commit()
214
+
215
+ await session.refresh(doc)
216
+
217
+ vec_db: FaissVecDB = self.vec_db # type: ignore
218
+ await self.kb_db.update_kb_stats(kb_id=self.kb.kb_id, vec_db=vec_db)
219
+ await self.refresh_kb()
220
+ await self.refresh_document(doc_id)
221
+ return doc
222
+ except Exception as e:
223
+ logger.error(f"上传文档失败: {e}")
224
+ # if file_path.exists():
225
+ # file_path.unlink()
226
+
227
+ for media_path in media_paths:
228
+ try:
229
+ if media_path.exists():
230
+ media_path.unlink()
231
+ except Exception as me:
232
+ logger.warning(f"清理多媒体文件失败 {media_path}: {me}")
233
+
234
+ raise e
235
+
236
+ async def list_documents(
237
+ self, offset: int = 0, limit: int = 100
238
+ ) -> list[KBDocument]:
239
+ """列出知识库的所有文档"""
240
+ docs = await self.kb_db.list_documents_by_kb(self.kb.kb_id, offset, limit)
241
+ return docs
242
+
243
+ async def get_document(self, doc_id: str) -> KBDocument | None:
244
+ """获取单个文档"""
245
+ doc = await self.kb_db.get_document_by_id(doc_id)
246
+ return doc
247
+
248
+ async def delete_document(self, doc_id: str):
249
+ """删除单个文档及其相关数据"""
250
+ await self.kb_db.delete_document_by_id(
251
+ doc_id=doc_id,
252
+ vec_db=self.vec_db, # type: ignore
253
+ )
254
+ await self.kb_db.update_kb_stats(
255
+ kb_id=self.kb.kb_id,
256
+ vec_db=self.vec_db, # type: ignore
257
+ )
258
+ await self.refresh_kb()
259
+
260
+ async def delete_chunk(self, chunk_id: str, doc_id: str):
261
+ """删除单个文本块及其相关数据"""
262
+ vec_db: FaissVecDB = self.vec_db # type: ignore
263
+ await vec_db.delete(chunk_id)
264
+ await self.kb_db.update_kb_stats(
265
+ kb_id=self.kb.kb_id,
266
+ vec_db=self.vec_db, # type: ignore
267
+ )
268
+ await self.refresh_kb()
269
+ await self.refresh_document(doc_id)
270
+
271
+ async def refresh_kb(self):
272
+ if self.kb:
273
+ kb = await self.kb_db.get_kb_by_id(self.kb.kb_id)
274
+ if kb:
275
+ self.kb = kb
276
+
277
+ async def refresh_document(self, doc_id: str) -> None:
278
+ """更新文档的元数据"""
279
+ doc = await self.get_document(doc_id)
280
+ if not doc:
281
+ raise ValueError(f"无法找到 ID 为 {doc_id} 的文档")
282
+ chunk_count = await self.get_chunk_count_by_doc_id(doc_id)
283
+ doc.chunk_count = chunk_count
284
+ async with self.kb_db.get_db() as session:
285
+ async with session.begin():
286
+ session.add(doc)
287
+ await session.commit()
288
+ await session.refresh(doc)
289
+
290
+ async def get_chunks_by_doc_id(
291
+ self, doc_id: str, offset: int = 0, limit: int = 100
292
+ ) -> list[dict]:
293
+ """获取文档的所有块及其元数据"""
294
+ vec_db: FaissVecDB = self.vec_db # type: ignore
295
+ chunks = await vec_db.document_storage.get_documents(
296
+ metadata_filters={"kb_doc_id": doc_id}, offset=offset, limit=limit
297
+ )
298
+ result = []
299
+ for chunk in chunks:
300
+ chunk_md = json.loads(chunk["metadata"])
301
+ result.append(
302
+ {
303
+ "chunk_id": chunk["doc_id"],
304
+ "doc_id": chunk_md["kb_doc_id"],
305
+ "kb_id": chunk_md["kb_id"],
306
+ "chunk_index": chunk_md["chunk_index"],
307
+ "content": chunk["text"],
308
+ "char_count": len(chunk["text"]),
309
+ }
310
+ )
311
+ return result
312
+
313
+ async def get_chunk_count_by_doc_id(self, doc_id: str) -> int:
314
+ """获取文档的块数量"""
315
+ vec_db: FaissVecDB = self.vec_db # type: ignore
316
+ count = await vec_db.count_documents(metadata_filter={"kb_doc_id": doc_id})
317
+ return count
318
+
319
+ async def _save_media(
320
+ self,
321
+ doc_id: str,
322
+ media_type: str,
323
+ file_name: str,
324
+ content: bytes,
325
+ mime_type: str,
326
+ ) -> KBMedia:
327
+ """保存多媒体资源"""
328
+ media_id = str(uuid.uuid4())
329
+ ext = Path(file_name).suffix
330
+
331
+ # 保存文件
332
+ file_path = self.kb_medias_dir / doc_id / f"{media_id}{ext}"
333
+ file_path.parent.mkdir(parents=True, exist_ok=True)
334
+ async with aiofiles.open(file_path, "wb") as f:
335
+ await f.write(content)
336
+
337
+ media = KBMedia(
338
+ media_id=media_id,
339
+ doc_id=doc_id,
340
+ kb_id=self.kb.kb_id,
341
+ media_type=media_type,
342
+ file_name=file_name,
343
+ file_path=str(file_path),
344
+ file_size=len(content),
345
+ mime_type=mime_type,
346
+ )
347
+
348
+ return media
@@ -0,0 +1,287 @@
1
+ import traceback
2
+ from pathlib import Path
3
+ from astrbot.core import logger
4
+ from astrbot.core.provider.manager import ProviderManager
5
+
6
+ from .retrieval.manager import RetrievalManager, RetrievalResult
7
+ from .retrieval.sparse_retriever import SparseRetriever
8
+ from .retrieval.rank_fusion import RankFusion
9
+ from .kb_db_sqlite import KBSQLiteDatabase
10
+
11
+ # from .chunking.fixed_size import FixedSizeChunker
12
+ from .chunking.recursive import RecursiveCharacterChunker
13
+ from .kb_helper import KBHelper
14
+
15
+ from .models import KnowledgeBase
16
+
17
+
18
+ FILES_PATH = "data/knowledge_base"
19
+ DB_PATH = Path(FILES_PATH) / "kb.db"
20
+ """Knowledge Base storage root directory"""
21
+ CHUNKER = RecursiveCharacterChunker()
22
+
23
+
24
+ class KnowledgeBaseManager:
25
+ kb_db: KBSQLiteDatabase
26
+ retrieval_manager: RetrievalManager
27
+
28
+ def __init__(
29
+ self,
30
+ provider_manager: ProviderManager,
31
+ ):
32
+ Path(DB_PATH).parent.mkdir(parents=True, exist_ok=True)
33
+ self.provider_manager = provider_manager
34
+ self._session_deleted_callback_registered = False
35
+
36
+ self.kb_insts: dict[str, KBHelper] = {}
37
+
38
+ async def initialize(self):
39
+ """初始化知识库模块"""
40
+ try:
41
+ logger.info("正在初始化知识库模块...")
42
+
43
+ # 初始化数据库
44
+ await self._init_kb_database()
45
+
46
+ # 初始化检索管理器
47
+ sparse_retriever = SparseRetriever(self.kb_db)
48
+ rank_fusion = RankFusion(self.kb_db)
49
+ self.retrieval_manager = RetrievalManager(
50
+ sparse_retriever=sparse_retriever,
51
+ rank_fusion=rank_fusion,
52
+ kb_db=self.kb_db,
53
+ )
54
+ await self.load_kbs()
55
+
56
+ except ImportError as e:
57
+ logger.error(f"知识库模块导入失败: {e}")
58
+ logger.warning("请确保已安装所需依赖: pypdf, aiofiles, Pillow, rank-bm25")
59
+ except Exception as e:
60
+ logger.error(f"知识库模块初始化失败: {e}")
61
+ logger.error(traceback.format_exc())
62
+
63
+ async def _init_kb_database(self):
64
+ self.kb_db = KBSQLiteDatabase(DB_PATH.as_posix())
65
+ await self.kb_db.initialize()
66
+ await self.kb_db.migrate_to_v1()
67
+ logger.info(f"KnowledgeBase database initialized: {DB_PATH}")
68
+
69
+ async def load_kbs(self):
70
+ """加载所有知识库实例"""
71
+ kb_records = await self.kb_db.list_kbs()
72
+ for record in kb_records:
73
+ kb_helper = KBHelper(
74
+ kb_db=self.kb_db,
75
+ kb=record,
76
+ provider_manager=self.provider_manager,
77
+ kb_root_dir=FILES_PATH,
78
+ chunker=CHUNKER,
79
+ )
80
+ await kb_helper.initialize()
81
+ self.kb_insts[record.kb_id] = kb_helper
82
+
83
+ async def create_kb(
84
+ self,
85
+ kb_name: str,
86
+ description: str | None = None,
87
+ emoji: str | None = None,
88
+ embedding_provider_id: str | None = None,
89
+ rerank_provider_id: str | None = None,
90
+ chunk_size: int | None = None,
91
+ chunk_overlap: int | None = None,
92
+ top_k_dense: int | None = None,
93
+ top_k_sparse: int | None = None,
94
+ top_m_final: int | None = None,
95
+ ) -> KBHelper:
96
+ """创建新的知识库实例"""
97
+ kb = KnowledgeBase(
98
+ kb_name=kb_name,
99
+ description=description,
100
+ emoji=emoji or "📚",
101
+ embedding_provider_id=embedding_provider_id,
102
+ rerank_provider_id=rerank_provider_id,
103
+ chunk_size=chunk_size if chunk_size is not None else 512,
104
+ chunk_overlap=chunk_overlap if chunk_overlap is not None else 50,
105
+ top_k_dense=top_k_dense if top_k_dense is not None else 50,
106
+ top_k_sparse=top_k_sparse if top_k_sparse is not None else 50,
107
+ top_m_final=top_m_final if top_m_final is not None else 5,
108
+ )
109
+ async with self.kb_db.get_db() as session:
110
+ session.add(kb)
111
+ await session.commit()
112
+ await session.refresh(kb)
113
+
114
+ kb_helper = KBHelper(
115
+ kb_db=self.kb_db,
116
+ kb=kb,
117
+ provider_manager=self.provider_manager,
118
+ kb_root_dir=FILES_PATH,
119
+ chunker=CHUNKER,
120
+ )
121
+ await kb_helper.initialize()
122
+ self.kb_insts[kb.kb_id] = kb_helper
123
+ return kb_helper
124
+
125
+ async def get_kb(self, kb_id: str) -> KBHelper | None:
126
+ """获取知识库实例"""
127
+ if kb_id in self.kb_insts:
128
+ return self.kb_insts[kb_id]
129
+
130
+ async def get_kb_by_name(self, kb_name: str) -> KBHelper | None:
131
+ """通过名称获取知识库实例"""
132
+ for kb_helper in self.kb_insts.values():
133
+ if kb_helper.kb.kb_name == kb_name:
134
+ return kb_helper
135
+ return None
136
+
137
+ async def delete_kb(self, kb_id: str) -> bool:
138
+ """删除知识库实例"""
139
+ kb_helper = await self.get_kb(kb_id)
140
+ if not kb_helper:
141
+ return False
142
+
143
+ await kb_helper.delete_vec_db()
144
+ async with self.kb_db.get_db() as session:
145
+ await session.delete(kb_helper.kb)
146
+ await session.commit()
147
+
148
+ self.kb_insts.pop(kb_id, None)
149
+ return True
150
+
151
+ async def list_kbs(self) -> list[KnowledgeBase]:
152
+ """列出所有知识库实例"""
153
+ kbs = [kb_helper.kb for kb_helper in self.kb_insts.values()]
154
+ return kbs
155
+
156
+ async def update_kb(
157
+ self,
158
+ kb_id: str,
159
+ kb_name: str,
160
+ description: str | None = None,
161
+ emoji: str | None = None,
162
+ embedding_provider_id: str | None = None,
163
+ rerank_provider_id: str | None = None,
164
+ chunk_size: int | None = None,
165
+ chunk_overlap: int | None = None,
166
+ top_k_dense: int | None = None,
167
+ top_k_sparse: int | None = None,
168
+ top_m_final: int | None = None,
169
+ ) -> KBHelper | None:
170
+ """更新知识库实例"""
171
+ kb_helper = await self.get_kb(kb_id)
172
+ if not kb_helper:
173
+ return None
174
+
175
+ kb = kb_helper.kb
176
+ if kb_name is not None:
177
+ kb.kb_name = kb_name
178
+ if description is not None:
179
+ kb.description = description
180
+ if emoji is not None:
181
+ kb.emoji = emoji
182
+ if embedding_provider_id is not None:
183
+ kb.embedding_provider_id = embedding_provider_id
184
+ kb.rerank_provider_id = rerank_provider_id # 允许设置为 None
185
+ if chunk_size is not None:
186
+ kb.chunk_size = chunk_size
187
+ if chunk_overlap is not None:
188
+ kb.chunk_overlap = chunk_overlap
189
+ if top_k_dense is not None:
190
+ kb.top_k_dense = top_k_dense
191
+ if top_k_sparse is not None:
192
+ kb.top_k_sparse = top_k_sparse
193
+ if top_m_final is not None:
194
+ kb.top_m_final = top_m_final
195
+ async with self.kb_db.get_db() as session:
196
+ session.add(kb)
197
+ await session.commit()
198
+ await session.refresh(kb)
199
+
200
+ return kb_helper
201
+
202
+ async def retrieve(
203
+ self,
204
+ query: str,
205
+ kb_names: list[str],
206
+ top_k_fusion: int = 20,
207
+ top_m_final: int = 5,
208
+ ) -> dict | None:
209
+ """从指定知识库中检索相关内容"""
210
+ kb_ids = []
211
+ kb_id_helper_map = {}
212
+ for kb_name in kb_names:
213
+ if kb_helper := await self.get_kb_by_name(kb_name):
214
+ kb_ids.append(kb_helper.kb.kb_id)
215
+ kb_id_helper_map[kb_helper.kb.kb_id] = kb_helper
216
+
217
+ if not kb_ids:
218
+ return {}
219
+
220
+ results = await self.retrieval_manager.retrieve(
221
+ query=query,
222
+ kb_ids=kb_ids,
223
+ kb_id_helper_map=kb_id_helper_map,
224
+ top_k_fusion=top_k_fusion,
225
+ top_m_final=top_m_final,
226
+ )
227
+ if not results:
228
+ return None
229
+
230
+ context_text = self._format_context(results)
231
+
232
+ results_dict = [
233
+ {
234
+ "chunk_id": r.chunk_id,
235
+ "doc_id": r.doc_id,
236
+ "kb_id": r.kb_id,
237
+ "kb_name": r.kb_name,
238
+ "doc_name": r.doc_name,
239
+ "chunk_index": r.metadata.get("chunk_index", 0),
240
+ "content": r.content,
241
+ "score": r.score,
242
+ "char_count": r.metadata.get("char_count", 0),
243
+ }
244
+ for r in results
245
+ ]
246
+
247
+ return {
248
+ "context_text": context_text,
249
+ "results": results_dict,
250
+ }
251
+
252
+ def _format_context(self, results: list[RetrievalResult]) -> str:
253
+ """格式化知识上下文
254
+
255
+ Args:
256
+ results: 检索结果列表
257
+
258
+ Returns:
259
+ str: 格式化的上下文文本
260
+ """
261
+ lines = ["以下是相关的知识库内容,请参考这些信息回答用户的问题:\n"]
262
+
263
+ for i, result in enumerate(results, 1):
264
+ lines.append(f"【知识 {i}】")
265
+ lines.append(f"来源: {result.kb_name} / {result.doc_name}")
266
+ lines.append(f"内容: {result.content}")
267
+ lines.append(f"相关度: {result.score:.2f}")
268
+ lines.append("")
269
+
270
+ return "\n".join(lines)
271
+
272
+ async def terminate(self):
273
+ """终止所有知识库实例,关闭数据库连接"""
274
+ for kb_id, kb_helper in self.kb_insts.items():
275
+ try:
276
+ await kb_helper.terminate()
277
+ except Exception as e:
278
+ logger.error(f"关闭知识库 {kb_id} 失败: {e}")
279
+
280
+ self.kb_insts.clear()
281
+
282
+ # 关闭元数据数据库
283
+ if hasattr(self, "kb_db") and self.kb_db:
284
+ try:
285
+ await self.kb_db.close()
286
+ except Exception as e:
287
+ logger.error(f"关闭知识库元数据数据库失败: {e}")