AstrBot 4.3.5__py3-none-any.whl → 4.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- astrbot/core/agent/runners/tool_loop_agent_runner.py +31 -2
- astrbot/core/astrbot_config_mgr.py +23 -51
- astrbot/core/config/default.py +132 -12
- astrbot/core/conversation_mgr.py +36 -1
- astrbot/core/core_lifecycle.py +24 -5
- astrbot/core/db/migration/helper.py +6 -3
- astrbot/core/db/migration/migra_45_to_46.py +44 -0
- astrbot/core/db/vec_db/base.py +33 -2
- astrbot/core/db/vec_db/faiss_impl/document_storage.py +310 -52
- astrbot/core/db/vec_db/faiss_impl/embedding_storage.py +31 -3
- astrbot/core/db/vec_db/faiss_impl/vec_db.py +81 -23
- astrbot/core/file_token_service.py +6 -1
- astrbot/core/initial_loader.py +6 -3
- astrbot/core/knowledge_base/chunking/__init__.py +11 -0
- astrbot/core/knowledge_base/chunking/base.py +24 -0
- astrbot/core/knowledge_base/chunking/fixed_size.py +57 -0
- astrbot/core/knowledge_base/chunking/recursive.py +155 -0
- astrbot/core/knowledge_base/kb_db_sqlite.py +299 -0
- astrbot/core/knowledge_base/kb_helper.py +348 -0
- astrbot/core/knowledge_base/kb_mgr.py +287 -0
- astrbot/core/knowledge_base/models.py +114 -0
- astrbot/core/knowledge_base/parsers/__init__.py +15 -0
- astrbot/core/knowledge_base/parsers/base.py +50 -0
- astrbot/core/knowledge_base/parsers/markitdown_parser.py +25 -0
- astrbot/core/knowledge_base/parsers/pdf_parser.py +100 -0
- astrbot/core/knowledge_base/parsers/text_parser.py +41 -0
- astrbot/core/knowledge_base/parsers/util.py +13 -0
- astrbot/core/knowledge_base/retrieval/__init__.py +16 -0
- astrbot/core/knowledge_base/retrieval/hit_stopwords.txt +767 -0
- astrbot/core/knowledge_base/retrieval/manager.py +273 -0
- astrbot/core/knowledge_base/retrieval/rank_fusion.py +138 -0
- astrbot/core/knowledge_base/retrieval/sparse_retriever.py +130 -0
- astrbot/core/pipeline/process_stage/method/llm_request.py +29 -7
- astrbot/core/pipeline/process_stage/utils.py +80 -0
- astrbot/core/platform/astr_message_event.py +8 -7
- astrbot/core/platform/sources/dingtalk/dingtalk_adapter.py +5 -2
- astrbot/core/platform/sources/misskey/misskey_adapter.py +380 -44
- astrbot/core/platform/sources/misskey/misskey_api.py +581 -45
- astrbot/core/platform/sources/misskey/misskey_event.py +76 -41
- astrbot/core/platform/sources/misskey/misskey_utils.py +254 -43
- astrbot/core/platform/sources/qqofficial_webhook/qo_webhook_server.py +2 -1
- astrbot/core/platform/sources/satori/satori_adapter.py +27 -1
- astrbot/core/platform/sources/satori/satori_event.py +270 -99
- astrbot/core/provider/manager.py +22 -9
- astrbot/core/provider/provider.py +67 -0
- astrbot/core/provider/sources/anthropic_source.py +4 -4
- astrbot/core/provider/sources/dashscope_source.py +10 -9
- astrbot/core/provider/sources/dify_source.py +6 -8
- astrbot/core/provider/sources/gemini_embedding_source.py +1 -2
- astrbot/core/provider/sources/openai_embedding_source.py +1 -2
- astrbot/core/provider/sources/openai_source.py +43 -15
- astrbot/core/provider/sources/openai_tts_api_source.py +1 -1
- astrbot/core/provider/sources/xinference_rerank_source.py +108 -0
- astrbot/core/provider/sources/xinference_stt_provider.py +187 -0
- astrbot/core/star/context.py +19 -13
- astrbot/core/star/star.py +6 -0
- astrbot/core/star/star_manager.py +13 -7
- astrbot/core/umop_config_router.py +81 -0
- astrbot/core/updator.py +1 -1
- astrbot/core/utils/io.py +23 -12
- astrbot/dashboard/routes/__init__.py +2 -0
- astrbot/dashboard/routes/config.py +137 -9
- astrbot/dashboard/routes/knowledge_base.py +1065 -0
- astrbot/dashboard/routes/plugin.py +24 -5
- astrbot/dashboard/routes/update.py +1 -1
- astrbot/dashboard/server.py +6 -0
- astrbot/dashboard/utils.py +161 -0
- {astrbot-4.3.5.dist-info → astrbot-4.5.1.dist-info}/METADATA +30 -13
- {astrbot-4.3.5.dist-info → astrbot-4.5.1.dist-info}/RECORD +72 -46
- {astrbot-4.3.5.dist-info → astrbot-4.5.1.dist-info}/WHEEL +0 -0
- {astrbot-4.3.5.dist-info → astrbot-4.5.1.dist-info}/entry_points.txt +0 -0
- {astrbot-4.3.5.dist-info → astrbot-4.5.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
import traceback
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from astrbot.core import logger
|
|
4
|
+
from astrbot.core.provider.manager import ProviderManager
|
|
5
|
+
|
|
6
|
+
from .retrieval.manager import RetrievalManager, RetrievalResult
|
|
7
|
+
from .retrieval.sparse_retriever import SparseRetriever
|
|
8
|
+
from .retrieval.rank_fusion import RankFusion
|
|
9
|
+
from .kb_db_sqlite import KBSQLiteDatabase
|
|
10
|
+
|
|
11
|
+
# from .chunking.fixed_size import FixedSizeChunker
|
|
12
|
+
from .chunking.recursive import RecursiveCharacterChunker
|
|
13
|
+
from .kb_helper import KBHelper
|
|
14
|
+
|
|
15
|
+
from .models import KnowledgeBase
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
FILES_PATH = "data/knowledge_base"
|
|
19
|
+
DB_PATH = Path(FILES_PATH) / "kb.db"
|
|
20
|
+
"""Knowledge Base storage root directory"""
|
|
21
|
+
CHUNKER = RecursiveCharacterChunker()
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class KnowledgeBaseManager:
|
|
25
|
+
kb_db: KBSQLiteDatabase
|
|
26
|
+
retrieval_manager: RetrievalManager
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
provider_manager: ProviderManager,
|
|
31
|
+
):
|
|
32
|
+
Path(DB_PATH).parent.mkdir(parents=True, exist_ok=True)
|
|
33
|
+
self.provider_manager = provider_manager
|
|
34
|
+
self._session_deleted_callback_registered = False
|
|
35
|
+
|
|
36
|
+
self.kb_insts: dict[str, KBHelper] = {}
|
|
37
|
+
|
|
38
|
+
async def initialize(self):
|
|
39
|
+
"""初始化知识库模块"""
|
|
40
|
+
try:
|
|
41
|
+
logger.info("正在初始化知识库模块...")
|
|
42
|
+
|
|
43
|
+
# 初始化数据库
|
|
44
|
+
await self._init_kb_database()
|
|
45
|
+
|
|
46
|
+
# 初始化检索管理器
|
|
47
|
+
sparse_retriever = SparseRetriever(self.kb_db)
|
|
48
|
+
rank_fusion = RankFusion(self.kb_db)
|
|
49
|
+
self.retrieval_manager = RetrievalManager(
|
|
50
|
+
sparse_retriever=sparse_retriever,
|
|
51
|
+
rank_fusion=rank_fusion,
|
|
52
|
+
kb_db=self.kb_db,
|
|
53
|
+
)
|
|
54
|
+
await self.load_kbs()
|
|
55
|
+
|
|
56
|
+
except ImportError as e:
|
|
57
|
+
logger.error(f"知识库模块导入失败: {e}")
|
|
58
|
+
logger.warning("请确保已安装所需依赖: pypdf, aiofiles, Pillow, rank-bm25")
|
|
59
|
+
except Exception as e:
|
|
60
|
+
logger.error(f"知识库模块初始化失败: {e}")
|
|
61
|
+
logger.error(traceback.format_exc())
|
|
62
|
+
|
|
63
|
+
async def _init_kb_database(self):
|
|
64
|
+
self.kb_db = KBSQLiteDatabase(DB_PATH.as_posix())
|
|
65
|
+
await self.kb_db.initialize()
|
|
66
|
+
await self.kb_db.migrate_to_v1()
|
|
67
|
+
logger.info(f"KnowledgeBase database initialized: {DB_PATH}")
|
|
68
|
+
|
|
69
|
+
async def load_kbs(self):
|
|
70
|
+
"""加载所有知识库实例"""
|
|
71
|
+
kb_records = await self.kb_db.list_kbs()
|
|
72
|
+
for record in kb_records:
|
|
73
|
+
kb_helper = KBHelper(
|
|
74
|
+
kb_db=self.kb_db,
|
|
75
|
+
kb=record,
|
|
76
|
+
provider_manager=self.provider_manager,
|
|
77
|
+
kb_root_dir=FILES_PATH,
|
|
78
|
+
chunker=CHUNKER,
|
|
79
|
+
)
|
|
80
|
+
await kb_helper.initialize()
|
|
81
|
+
self.kb_insts[record.kb_id] = kb_helper
|
|
82
|
+
|
|
83
|
+
async def create_kb(
|
|
84
|
+
self,
|
|
85
|
+
kb_name: str,
|
|
86
|
+
description: str | None = None,
|
|
87
|
+
emoji: str | None = None,
|
|
88
|
+
embedding_provider_id: str | None = None,
|
|
89
|
+
rerank_provider_id: str | None = None,
|
|
90
|
+
chunk_size: int | None = None,
|
|
91
|
+
chunk_overlap: int | None = None,
|
|
92
|
+
top_k_dense: int | None = None,
|
|
93
|
+
top_k_sparse: int | None = None,
|
|
94
|
+
top_m_final: int | None = None,
|
|
95
|
+
) -> KBHelper:
|
|
96
|
+
"""创建新的知识库实例"""
|
|
97
|
+
kb = KnowledgeBase(
|
|
98
|
+
kb_name=kb_name,
|
|
99
|
+
description=description,
|
|
100
|
+
emoji=emoji or "📚",
|
|
101
|
+
embedding_provider_id=embedding_provider_id,
|
|
102
|
+
rerank_provider_id=rerank_provider_id,
|
|
103
|
+
chunk_size=chunk_size if chunk_size is not None else 512,
|
|
104
|
+
chunk_overlap=chunk_overlap if chunk_overlap is not None else 50,
|
|
105
|
+
top_k_dense=top_k_dense if top_k_dense is not None else 50,
|
|
106
|
+
top_k_sparse=top_k_sparse if top_k_sparse is not None else 50,
|
|
107
|
+
top_m_final=top_m_final if top_m_final is not None else 5,
|
|
108
|
+
)
|
|
109
|
+
async with self.kb_db.get_db() as session:
|
|
110
|
+
session.add(kb)
|
|
111
|
+
await session.commit()
|
|
112
|
+
await session.refresh(kb)
|
|
113
|
+
|
|
114
|
+
kb_helper = KBHelper(
|
|
115
|
+
kb_db=self.kb_db,
|
|
116
|
+
kb=kb,
|
|
117
|
+
provider_manager=self.provider_manager,
|
|
118
|
+
kb_root_dir=FILES_PATH,
|
|
119
|
+
chunker=CHUNKER,
|
|
120
|
+
)
|
|
121
|
+
await kb_helper.initialize()
|
|
122
|
+
self.kb_insts[kb.kb_id] = kb_helper
|
|
123
|
+
return kb_helper
|
|
124
|
+
|
|
125
|
+
async def get_kb(self, kb_id: str) -> KBHelper | None:
|
|
126
|
+
"""获取知识库实例"""
|
|
127
|
+
if kb_id in self.kb_insts:
|
|
128
|
+
return self.kb_insts[kb_id]
|
|
129
|
+
|
|
130
|
+
async def get_kb_by_name(self, kb_name: str) -> KBHelper | None:
|
|
131
|
+
"""通过名称获取知识库实例"""
|
|
132
|
+
for kb_helper in self.kb_insts.values():
|
|
133
|
+
if kb_helper.kb.kb_name == kb_name:
|
|
134
|
+
return kb_helper
|
|
135
|
+
return None
|
|
136
|
+
|
|
137
|
+
async def delete_kb(self, kb_id: str) -> bool:
|
|
138
|
+
"""删除知识库实例"""
|
|
139
|
+
kb_helper = await self.get_kb(kb_id)
|
|
140
|
+
if not kb_helper:
|
|
141
|
+
return False
|
|
142
|
+
|
|
143
|
+
await kb_helper.delete_vec_db()
|
|
144
|
+
async with self.kb_db.get_db() as session:
|
|
145
|
+
await session.delete(kb_helper.kb)
|
|
146
|
+
await session.commit()
|
|
147
|
+
|
|
148
|
+
self.kb_insts.pop(kb_id, None)
|
|
149
|
+
return True
|
|
150
|
+
|
|
151
|
+
async def list_kbs(self) -> list[KnowledgeBase]:
|
|
152
|
+
"""列出所有知识库实例"""
|
|
153
|
+
kbs = [kb_helper.kb for kb_helper in self.kb_insts.values()]
|
|
154
|
+
return kbs
|
|
155
|
+
|
|
156
|
+
async def update_kb(
|
|
157
|
+
self,
|
|
158
|
+
kb_id: str,
|
|
159
|
+
kb_name: str,
|
|
160
|
+
description: str | None = None,
|
|
161
|
+
emoji: str | None = None,
|
|
162
|
+
embedding_provider_id: str | None = None,
|
|
163
|
+
rerank_provider_id: str | None = None,
|
|
164
|
+
chunk_size: int | None = None,
|
|
165
|
+
chunk_overlap: int | None = None,
|
|
166
|
+
top_k_dense: int | None = None,
|
|
167
|
+
top_k_sparse: int | None = None,
|
|
168
|
+
top_m_final: int | None = None,
|
|
169
|
+
) -> KBHelper | None:
|
|
170
|
+
"""更新知识库实例"""
|
|
171
|
+
kb_helper = await self.get_kb(kb_id)
|
|
172
|
+
if not kb_helper:
|
|
173
|
+
return None
|
|
174
|
+
|
|
175
|
+
kb = kb_helper.kb
|
|
176
|
+
if kb_name is not None:
|
|
177
|
+
kb.kb_name = kb_name
|
|
178
|
+
if description is not None:
|
|
179
|
+
kb.description = description
|
|
180
|
+
if emoji is not None:
|
|
181
|
+
kb.emoji = emoji
|
|
182
|
+
if embedding_provider_id is not None:
|
|
183
|
+
kb.embedding_provider_id = embedding_provider_id
|
|
184
|
+
kb.rerank_provider_id = rerank_provider_id # 允许设置为 None
|
|
185
|
+
if chunk_size is not None:
|
|
186
|
+
kb.chunk_size = chunk_size
|
|
187
|
+
if chunk_overlap is not None:
|
|
188
|
+
kb.chunk_overlap = chunk_overlap
|
|
189
|
+
if top_k_dense is not None:
|
|
190
|
+
kb.top_k_dense = top_k_dense
|
|
191
|
+
if top_k_sparse is not None:
|
|
192
|
+
kb.top_k_sparse = top_k_sparse
|
|
193
|
+
if top_m_final is not None:
|
|
194
|
+
kb.top_m_final = top_m_final
|
|
195
|
+
async with self.kb_db.get_db() as session:
|
|
196
|
+
session.add(kb)
|
|
197
|
+
await session.commit()
|
|
198
|
+
await session.refresh(kb)
|
|
199
|
+
|
|
200
|
+
return kb_helper
|
|
201
|
+
|
|
202
|
+
async def retrieve(
|
|
203
|
+
self,
|
|
204
|
+
query: str,
|
|
205
|
+
kb_names: list[str],
|
|
206
|
+
top_k_fusion: int = 20,
|
|
207
|
+
top_m_final: int = 5,
|
|
208
|
+
) -> dict | None:
|
|
209
|
+
"""从指定知识库中检索相关内容"""
|
|
210
|
+
kb_ids = []
|
|
211
|
+
kb_id_helper_map = {}
|
|
212
|
+
for kb_name in kb_names:
|
|
213
|
+
if kb_helper := await self.get_kb_by_name(kb_name):
|
|
214
|
+
kb_ids.append(kb_helper.kb.kb_id)
|
|
215
|
+
kb_id_helper_map[kb_helper.kb.kb_id] = kb_helper
|
|
216
|
+
|
|
217
|
+
if not kb_ids:
|
|
218
|
+
return {}
|
|
219
|
+
|
|
220
|
+
results = await self.retrieval_manager.retrieve(
|
|
221
|
+
query=query,
|
|
222
|
+
kb_ids=kb_ids,
|
|
223
|
+
kb_id_helper_map=kb_id_helper_map,
|
|
224
|
+
top_k_fusion=top_k_fusion,
|
|
225
|
+
top_m_final=top_m_final,
|
|
226
|
+
)
|
|
227
|
+
if not results:
|
|
228
|
+
return None
|
|
229
|
+
|
|
230
|
+
context_text = self._format_context(results)
|
|
231
|
+
|
|
232
|
+
results_dict = [
|
|
233
|
+
{
|
|
234
|
+
"chunk_id": r.chunk_id,
|
|
235
|
+
"doc_id": r.doc_id,
|
|
236
|
+
"kb_id": r.kb_id,
|
|
237
|
+
"kb_name": r.kb_name,
|
|
238
|
+
"doc_name": r.doc_name,
|
|
239
|
+
"chunk_index": r.metadata.get("chunk_index", 0),
|
|
240
|
+
"content": r.content,
|
|
241
|
+
"score": r.score,
|
|
242
|
+
"char_count": r.metadata.get("char_count", 0),
|
|
243
|
+
}
|
|
244
|
+
for r in results
|
|
245
|
+
]
|
|
246
|
+
|
|
247
|
+
return {
|
|
248
|
+
"context_text": context_text,
|
|
249
|
+
"results": results_dict,
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
def _format_context(self, results: list[RetrievalResult]) -> str:
|
|
253
|
+
"""格式化知识上下文
|
|
254
|
+
|
|
255
|
+
Args:
|
|
256
|
+
results: 检索结果列表
|
|
257
|
+
|
|
258
|
+
Returns:
|
|
259
|
+
str: 格式化的上下文文本
|
|
260
|
+
"""
|
|
261
|
+
lines = ["以下是相关的知识库内容,请参考这些信息回答用户的问题:\n"]
|
|
262
|
+
|
|
263
|
+
for i, result in enumerate(results, 1):
|
|
264
|
+
lines.append(f"【知识 {i}】")
|
|
265
|
+
lines.append(f"来源: {result.kb_name} / {result.doc_name}")
|
|
266
|
+
lines.append(f"内容: {result.content}")
|
|
267
|
+
lines.append(f"相关度: {result.score:.2f}")
|
|
268
|
+
lines.append("")
|
|
269
|
+
|
|
270
|
+
return "\n".join(lines)
|
|
271
|
+
|
|
272
|
+
async def terminate(self):
|
|
273
|
+
"""终止所有知识库实例,关闭数据库连接"""
|
|
274
|
+
for kb_id, kb_helper in self.kb_insts.items():
|
|
275
|
+
try:
|
|
276
|
+
await kb_helper.terminate()
|
|
277
|
+
except Exception as e:
|
|
278
|
+
logger.error(f"关闭知识库 {kb_id} 失败: {e}")
|
|
279
|
+
|
|
280
|
+
self.kb_insts.clear()
|
|
281
|
+
|
|
282
|
+
# 关闭元数据数据库
|
|
283
|
+
if hasattr(self, "kb_db") and self.kb_db:
|
|
284
|
+
try:
|
|
285
|
+
await self.kb_db.close()
|
|
286
|
+
except Exception as e:
|
|
287
|
+
logger.error(f"关闭知识库元数据数据库失败: {e}")
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
import uuid
|
|
2
|
+
from datetime import datetime, timezone
|
|
3
|
+
|
|
4
|
+
from sqlmodel import Field, SQLModel, Text, UniqueConstraint, MetaData
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class BaseKBModel(SQLModel, table=False):
|
|
8
|
+
metadata = MetaData()
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class KnowledgeBase(BaseKBModel, table=True):
|
|
12
|
+
"""知识库表
|
|
13
|
+
|
|
14
|
+
存储知识库的基本信息和统计数据。
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
__tablename__ = "knowledge_bases" # type: ignore
|
|
18
|
+
|
|
19
|
+
id: int | None = Field(
|
|
20
|
+
primary_key=True, sa_column_kwargs={"autoincrement": True}, default=None
|
|
21
|
+
)
|
|
22
|
+
kb_id: str = Field(
|
|
23
|
+
max_length=36,
|
|
24
|
+
nullable=False,
|
|
25
|
+
unique=True,
|
|
26
|
+
default_factory=lambda: str(uuid.uuid4()),
|
|
27
|
+
index=True,
|
|
28
|
+
)
|
|
29
|
+
kb_name: str = Field(max_length=100, nullable=False)
|
|
30
|
+
description: str | None = Field(default=None, sa_type=Text)
|
|
31
|
+
emoji: str | None = Field(default="📚", max_length=10)
|
|
32
|
+
embedding_provider_id: str | None = Field(default=None, max_length=100)
|
|
33
|
+
rerank_provider_id: str | None = Field(default=None, max_length=100)
|
|
34
|
+
# 分块配置参数
|
|
35
|
+
chunk_size: int | None = Field(default=512, nullable=True)
|
|
36
|
+
chunk_overlap: int | None = Field(default=50, nullable=True)
|
|
37
|
+
# 检索配置参数
|
|
38
|
+
top_k_dense: int | None = Field(default=50, nullable=True)
|
|
39
|
+
top_k_sparse: int | None = Field(default=50, nullable=True)
|
|
40
|
+
top_m_final: int | None = Field(default=5, nullable=True)
|
|
41
|
+
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
42
|
+
updated_at: datetime = Field(
|
|
43
|
+
default_factory=lambda: datetime.now(timezone.utc),
|
|
44
|
+
sa_column_kwargs={"onupdate": datetime.now(timezone.utc)},
|
|
45
|
+
)
|
|
46
|
+
doc_count: int = Field(default=0, nullable=False)
|
|
47
|
+
chunk_count: int = Field(default=0, nullable=False)
|
|
48
|
+
|
|
49
|
+
__table_args__ = (
|
|
50
|
+
UniqueConstraint(
|
|
51
|
+
"kb_name",
|
|
52
|
+
name="uix_kb_name",
|
|
53
|
+
),
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class KBDocument(BaseKBModel, table=True):
|
|
58
|
+
"""文档表
|
|
59
|
+
|
|
60
|
+
存储上传到知识库的文档元数据。
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
__tablename__ = "kb_documents" # type: ignore
|
|
64
|
+
|
|
65
|
+
id: int | None = Field(
|
|
66
|
+
primary_key=True, sa_column_kwargs={"autoincrement": True}, default=None
|
|
67
|
+
)
|
|
68
|
+
doc_id: str = Field(
|
|
69
|
+
max_length=36,
|
|
70
|
+
nullable=False,
|
|
71
|
+
unique=True,
|
|
72
|
+
default_factory=lambda: str(uuid.uuid4()),
|
|
73
|
+
index=True,
|
|
74
|
+
)
|
|
75
|
+
kb_id: str = Field(max_length=36, nullable=False, index=True)
|
|
76
|
+
doc_name: str = Field(max_length=255, nullable=False)
|
|
77
|
+
file_type: str = Field(max_length=20, nullable=False)
|
|
78
|
+
file_size: int = Field(nullable=False)
|
|
79
|
+
file_path: str = Field(max_length=512, nullable=False)
|
|
80
|
+
chunk_count: int = Field(default=0, nullable=False)
|
|
81
|
+
media_count: int = Field(default=0, nullable=False)
|
|
82
|
+
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
83
|
+
updated_at: datetime = Field(
|
|
84
|
+
default_factory=lambda: datetime.now(timezone.utc),
|
|
85
|
+
sa_column_kwargs={"onupdate": datetime.now(timezone.utc)},
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
class KBMedia(BaseKBModel, table=True):
|
|
90
|
+
"""多媒体资源表
|
|
91
|
+
|
|
92
|
+
存储从文档中提取的图片、视频等多媒体资源。
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
__tablename__ = "kb_media" # type: ignore
|
|
96
|
+
|
|
97
|
+
id: int | None = Field(
|
|
98
|
+
primary_key=True, sa_column_kwargs={"autoincrement": True}, default=None
|
|
99
|
+
)
|
|
100
|
+
media_id: str = Field(
|
|
101
|
+
max_length=36,
|
|
102
|
+
nullable=False,
|
|
103
|
+
unique=True,
|
|
104
|
+
default_factory=lambda: str(uuid.uuid4()),
|
|
105
|
+
index=True,
|
|
106
|
+
)
|
|
107
|
+
doc_id: str = Field(max_length=36, nullable=False, index=True)
|
|
108
|
+
kb_id: str = Field(max_length=36, nullable=False, index=True)
|
|
109
|
+
media_type: str = Field(max_length=20, nullable=False)
|
|
110
|
+
file_name: str = Field(max_length=255, nullable=False)
|
|
111
|
+
file_path: str = Field(max_length=512, nullable=False)
|
|
112
|
+
file_size: int = Field(nullable=False)
|
|
113
|
+
mime_type: str = Field(max_length=100, nullable=False)
|
|
114
|
+
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""
|
|
2
|
+
文档解析器模块
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from .base import BaseParser, MediaItem, ParseResult
|
|
6
|
+
from .text_parser import TextParser
|
|
7
|
+
from .pdf_parser import PDFParser
|
|
8
|
+
|
|
9
|
+
__all__ = [
|
|
10
|
+
"BaseParser",
|
|
11
|
+
"MediaItem",
|
|
12
|
+
"ParseResult",
|
|
13
|
+
"TextParser",
|
|
14
|
+
"PDFParser",
|
|
15
|
+
]
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
"""文档解析器基类和数据结构
|
|
2
|
+
|
|
3
|
+
定义了文档解析器的抽象接口和相关数据类。
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from abc import ABC, abstractmethod
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class MediaItem:
|
|
12
|
+
"""多媒体项
|
|
13
|
+
|
|
14
|
+
表示从文档中提取的多媒体资源。
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
media_type: str # image, video
|
|
18
|
+
file_name: str
|
|
19
|
+
content: bytes
|
|
20
|
+
mime_type: str
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class ParseResult:
|
|
25
|
+
"""解析结果
|
|
26
|
+
|
|
27
|
+
包含解析后的文本内容和提取的多媒体资源。
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
text: str
|
|
31
|
+
media: list[MediaItem]
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class BaseParser(ABC):
|
|
35
|
+
"""文档解析器基类
|
|
36
|
+
|
|
37
|
+
所有文档解析器都应该继承此类并实现 parse 方法。
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
@abstractmethod
|
|
41
|
+
async def parse(self, file_content: bytes, file_name: str) -> ParseResult:
|
|
42
|
+
"""解析文档
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
file_content: 文件内容
|
|
46
|
+
file_name: 文件名
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
ParseResult: 解析结果
|
|
50
|
+
"""
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import io
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
from astrbot.core.knowledge_base.parsers.base import (
|
|
5
|
+
BaseParser,
|
|
6
|
+
ParseResult,
|
|
7
|
+
)
|
|
8
|
+
from markitdown_no_magika import MarkItDown, StreamInfo
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class MarkitdownParser(BaseParser):
|
|
12
|
+
"""解析 docx, xls, xlsx 格式"""
|
|
13
|
+
|
|
14
|
+
async def parse(self, file_content: bytes, file_name: str) -> ParseResult:
|
|
15
|
+
md = MarkItDown(enable_plugins=False)
|
|
16
|
+
bio = io.BytesIO(file_content)
|
|
17
|
+
stream_info = StreamInfo(
|
|
18
|
+
extension=os.path.splitext(file_name)[1].lower(),
|
|
19
|
+
filename=file_name,
|
|
20
|
+
)
|
|
21
|
+
result = md.convert(bio, stream_info=stream_info)
|
|
22
|
+
return ParseResult(
|
|
23
|
+
text=result.markdown,
|
|
24
|
+
media=[],
|
|
25
|
+
)
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
"""PDF 文件解析器
|
|
2
|
+
|
|
3
|
+
支持解析 PDF 文件中的文本和图片资源。
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import io
|
|
7
|
+
|
|
8
|
+
from pypdf import PdfReader
|
|
9
|
+
|
|
10
|
+
from astrbot.core.knowledge_base.parsers.base import (
|
|
11
|
+
BaseParser,
|
|
12
|
+
MediaItem,
|
|
13
|
+
ParseResult,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class PDFParser(BaseParser):
|
|
18
|
+
"""PDF 文档解析器
|
|
19
|
+
|
|
20
|
+
提取 PDF 中的文本内容和嵌入的图片资源。
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
async def parse(self, file_content: bytes, file_name: str) -> ParseResult:
|
|
24
|
+
"""解析 PDF 文件
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
file_content: 文件内容
|
|
28
|
+
file_name: 文件名
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
ParseResult: 包含文本和图片的解析结果
|
|
32
|
+
"""
|
|
33
|
+
pdf_file = io.BytesIO(file_content)
|
|
34
|
+
reader = PdfReader(pdf_file)
|
|
35
|
+
|
|
36
|
+
text_parts = []
|
|
37
|
+
media_items = []
|
|
38
|
+
|
|
39
|
+
# 提取文本
|
|
40
|
+
for page in reader.pages:
|
|
41
|
+
text = page.extract_text()
|
|
42
|
+
if text:
|
|
43
|
+
text_parts.append(text)
|
|
44
|
+
|
|
45
|
+
# 提取图片
|
|
46
|
+
image_counter = 0
|
|
47
|
+
for page_num, page in enumerate(reader.pages):
|
|
48
|
+
try:
|
|
49
|
+
# 安全检查 Resources
|
|
50
|
+
if "/Resources" not in page:
|
|
51
|
+
continue
|
|
52
|
+
|
|
53
|
+
resources = page["/Resources"]
|
|
54
|
+
if not resources or "/XObject" not in resources: # type: ignore
|
|
55
|
+
continue
|
|
56
|
+
|
|
57
|
+
xobjects = resources["/XObject"].get_object() # type: ignore
|
|
58
|
+
if not xobjects:
|
|
59
|
+
continue
|
|
60
|
+
|
|
61
|
+
for obj_name in xobjects:
|
|
62
|
+
try:
|
|
63
|
+
obj = xobjects[obj_name]
|
|
64
|
+
|
|
65
|
+
if obj.get("/Subtype") != "/Image":
|
|
66
|
+
continue
|
|
67
|
+
|
|
68
|
+
# 提取图片数据
|
|
69
|
+
image_data = obj.get_data()
|
|
70
|
+
|
|
71
|
+
# 确定格式
|
|
72
|
+
filter_type = obj.get("/Filter", "")
|
|
73
|
+
if filter_type == "/DCTDecode":
|
|
74
|
+
ext = "jpg"
|
|
75
|
+
mime_type = "image/jpeg"
|
|
76
|
+
elif filter_type == "/FlateDecode":
|
|
77
|
+
ext = "png"
|
|
78
|
+
mime_type = "image/png"
|
|
79
|
+
else:
|
|
80
|
+
ext = "png"
|
|
81
|
+
mime_type = "image/png"
|
|
82
|
+
|
|
83
|
+
image_counter += 1
|
|
84
|
+
media_items.append(
|
|
85
|
+
MediaItem(
|
|
86
|
+
media_type="image",
|
|
87
|
+
file_name=f"page_{page_num}_img_{image_counter}.{ext}",
|
|
88
|
+
content=image_data,
|
|
89
|
+
mime_type=mime_type,
|
|
90
|
+
)
|
|
91
|
+
)
|
|
92
|
+
except Exception:
|
|
93
|
+
# 单个图片提取失败不影响整体
|
|
94
|
+
continue
|
|
95
|
+
except Exception:
|
|
96
|
+
# 页面处理失败不影响其他页面
|
|
97
|
+
continue
|
|
98
|
+
|
|
99
|
+
full_text = "\n\n".join(text_parts)
|
|
100
|
+
return ParseResult(text=full_text, media=media_items)
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"""文本文件解析器
|
|
2
|
+
|
|
3
|
+
支持解析 TXT 和 Markdown 文件。
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from astrbot.core.knowledge_base.parsers.base import BaseParser, ParseResult
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class TextParser(BaseParser):
|
|
10
|
+
"""TXT/MD 文本解析器
|
|
11
|
+
|
|
12
|
+
支持多种字符编码的自动检测。
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
async def parse(self, file_content: bytes, file_name: str) -> ParseResult:
|
|
16
|
+
"""解析文本文件
|
|
17
|
+
|
|
18
|
+
尝试使用多种编码解析文件内容。
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
file_content: 文件内容
|
|
22
|
+
file_name: 文件名
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
ParseResult: 解析结果,不包含多媒体资源
|
|
26
|
+
|
|
27
|
+
Raises:
|
|
28
|
+
ValueError: 如果无法解码文件
|
|
29
|
+
"""
|
|
30
|
+
# 尝试多种编码
|
|
31
|
+
for encoding in ["utf-8", "gbk", "gb2312", "gb18030"]:
|
|
32
|
+
try:
|
|
33
|
+
text = file_content.decode(encoding)
|
|
34
|
+
break
|
|
35
|
+
except UnicodeDecodeError:
|
|
36
|
+
continue
|
|
37
|
+
else:
|
|
38
|
+
raise ValueError(f"无法解码文件: {file_name}")
|
|
39
|
+
|
|
40
|
+
# 文本文件无多媒体资源
|
|
41
|
+
return ParseResult(text=text, media=[])
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from .base import BaseParser
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
async def select_parser(ext: str) -> BaseParser:
|
|
5
|
+
if ext in {".md", ".txt", ".markdown", ".xlsx", ".docx", ".xls"}:
|
|
6
|
+
from .markitdown_parser import MarkitdownParser
|
|
7
|
+
|
|
8
|
+
return MarkitdownParser()
|
|
9
|
+
elif ext == ".pdf":
|
|
10
|
+
from .pdf_parser import PDFParser
|
|
11
|
+
|
|
12
|
+
return PDFParser()
|
|
13
|
+
raise ValueError(f"暂时不支持的文件格式: {ext}")
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"""
|
|
2
|
+
检索模块
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from .manager import RetrievalManager, RetrievalResult
|
|
6
|
+
from .sparse_retriever import SparseRetriever, SparseResult
|
|
7
|
+
from .rank_fusion import RankFusion, FusedResult
|
|
8
|
+
|
|
9
|
+
__all__ = [
|
|
10
|
+
"RetrievalManager",
|
|
11
|
+
"RetrievalResult",
|
|
12
|
+
"SparseRetriever",
|
|
13
|
+
"SparseResult",
|
|
14
|
+
"RankFusion",
|
|
15
|
+
"FusedResult",
|
|
16
|
+
]
|