AstrBot 4.3.5__py3-none-any.whl → 4.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- astrbot/core/agent/runners/tool_loop_agent_runner.py +31 -2
- astrbot/core/astrbot_config_mgr.py +23 -51
- astrbot/core/config/default.py +92 -12
- astrbot/core/conversation_mgr.py +36 -1
- astrbot/core/core_lifecycle.py +24 -5
- astrbot/core/db/migration/migra_45_to_46.py +44 -0
- astrbot/core/db/vec_db/base.py +33 -2
- astrbot/core/db/vec_db/faiss_impl/document_storage.py +310 -52
- astrbot/core/db/vec_db/faiss_impl/embedding_storage.py +31 -3
- astrbot/core/db/vec_db/faiss_impl/vec_db.py +81 -23
- astrbot/core/file_token_service.py +6 -1
- astrbot/core/initial_loader.py +6 -3
- astrbot/core/knowledge_base/chunking/__init__.py +11 -0
- astrbot/core/knowledge_base/chunking/base.py +24 -0
- astrbot/core/knowledge_base/chunking/fixed_size.py +57 -0
- astrbot/core/knowledge_base/chunking/recursive.py +155 -0
- astrbot/core/knowledge_base/kb_db_sqlite.py +299 -0
- astrbot/core/knowledge_base/kb_helper.py +348 -0
- astrbot/core/knowledge_base/kb_mgr.py +287 -0
- astrbot/core/knowledge_base/models.py +114 -0
- astrbot/core/knowledge_base/parsers/__init__.py +15 -0
- astrbot/core/knowledge_base/parsers/base.py +50 -0
- astrbot/core/knowledge_base/parsers/markitdown_parser.py +25 -0
- astrbot/core/knowledge_base/parsers/pdf_parser.py +100 -0
- astrbot/core/knowledge_base/parsers/text_parser.py +41 -0
- astrbot/core/knowledge_base/parsers/util.py +13 -0
- astrbot/core/knowledge_base/retrieval/__init__.py +16 -0
- astrbot/core/knowledge_base/retrieval/hit_stopwords.txt +767 -0
- astrbot/core/knowledge_base/retrieval/manager.py +273 -0
- astrbot/core/knowledge_base/retrieval/rank_fusion.py +138 -0
- astrbot/core/knowledge_base/retrieval/sparse_retriever.py +130 -0
- astrbot/core/pipeline/process_stage/method/llm_request.py +29 -7
- astrbot/core/pipeline/process_stage/utils.py +80 -0
- astrbot/core/platform/astr_message_event.py +8 -7
- astrbot/core/platform/sources/misskey/misskey_adapter.py +380 -44
- astrbot/core/platform/sources/misskey/misskey_api.py +581 -45
- astrbot/core/platform/sources/misskey/misskey_event.py +76 -41
- astrbot/core/platform/sources/misskey/misskey_utils.py +254 -43
- astrbot/core/platform/sources/qqofficial_webhook/qo_webhook_server.py +2 -1
- astrbot/core/platform/sources/satori/satori_adapter.py +27 -1
- astrbot/core/platform/sources/satori/satori_event.py +270 -99
- astrbot/core/provider/manager.py +14 -9
- astrbot/core/provider/provider.py +67 -0
- astrbot/core/provider/sources/anthropic_source.py +4 -4
- astrbot/core/provider/sources/dashscope_source.py +10 -9
- astrbot/core/provider/sources/dify_source.py +6 -8
- astrbot/core/provider/sources/gemini_embedding_source.py +1 -2
- astrbot/core/provider/sources/openai_embedding_source.py +1 -2
- astrbot/core/provider/sources/openai_source.py +18 -15
- astrbot/core/provider/sources/openai_tts_api_source.py +1 -1
- astrbot/core/star/context.py +3 -0
- astrbot/core/star/star.py +6 -0
- astrbot/core/star/star_manager.py +13 -7
- astrbot/core/umop_config_router.py +81 -0
- astrbot/core/updator.py +1 -1
- astrbot/core/utils/io.py +23 -12
- astrbot/dashboard/routes/__init__.py +2 -0
- astrbot/dashboard/routes/config.py +137 -9
- astrbot/dashboard/routes/knowledge_base.py +1065 -0
- astrbot/dashboard/routes/plugin.py +24 -5
- astrbot/dashboard/routes/update.py +1 -1
- astrbot/dashboard/server.py +6 -0
- astrbot/dashboard/utils.py +161 -0
- {astrbot-4.3.5.dist-info → astrbot-4.5.0.dist-info}/METADATA +29 -13
- {astrbot-4.3.5.dist-info → astrbot-4.5.0.dist-info}/RECORD +68 -44
- {astrbot-4.3.5.dist-info → astrbot-4.5.0.dist-info}/WHEEL +0 -0
- {astrbot-4.3.5.dist-info → astrbot-4.5.0.dist-info}/entry_points.txt +0 -0
- {astrbot-4.3.5.dist-info → astrbot-4.5.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -23,7 +23,12 @@ class FileTokenService:
|
|
|
23
23
|
for token in expired_tokens:
|
|
24
24
|
self.staged_files.pop(token, None)
|
|
25
25
|
|
|
26
|
-
async def
|
|
26
|
+
async def check_token_expired(self, file_token: str) -> bool:
|
|
27
|
+
async with self.lock:
|
|
28
|
+
await self._cleanup_expired_tokens()
|
|
29
|
+
return file_token not in self.staged_files
|
|
30
|
+
|
|
31
|
+
async def register_file(self, file_path: str, timeout: float | None = None) -> str:
|
|
27
32
|
"""向令牌服务注册一个文件。
|
|
28
33
|
|
|
29
34
|
Args:
|
astrbot/core/initial_loader.py
CHANGED
|
@@ -41,10 +41,13 @@ class InitialLoader:
|
|
|
41
41
|
self.dashboard_server = AstrBotDashboard(
|
|
42
42
|
core_lifecycle, self.db, core_lifecycle.dashboard_shutdown_event, webui_dir
|
|
43
43
|
)
|
|
44
|
-
task = asyncio.gather(
|
|
45
|
-
core_task, self.dashboard_server.run()
|
|
46
|
-
) # 启动核心任务和仪表板服务器
|
|
47
44
|
|
|
45
|
+
coro = self.dashboard_server.run()
|
|
46
|
+
if coro:
|
|
47
|
+
# 启动核心任务和仪表板服务器
|
|
48
|
+
task = asyncio.gather(core_task, coro)
|
|
49
|
+
else:
|
|
50
|
+
task = core_task
|
|
48
51
|
try:
|
|
49
52
|
await task # 整个AstrBot在这里运行
|
|
50
53
|
except asyncio.CancelledError:
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
"""文档分块器基类
|
|
2
|
+
|
|
3
|
+
定义了文档分块处理的抽象接口。
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from abc import ABC, abstractmethod
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class BaseChunker(ABC):
|
|
10
|
+
"""分块器基类
|
|
11
|
+
|
|
12
|
+
所有分块器都应该继承此类并实现 chunk 方法。
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
@abstractmethod
|
|
16
|
+
async def chunk(self, text: str, **kwargs) -> list[str]:
|
|
17
|
+
"""将文本分块
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
text: 输入文本
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
list[str]: 分块后的文本列表
|
|
24
|
+
"""
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""固定大小分块器
|
|
2
|
+
|
|
3
|
+
按照固定的字符数将文本分块,支持重叠区域。
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from .base import BaseChunker
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class FixedSizeChunker(BaseChunker):
|
|
10
|
+
"""固定大小分块器
|
|
11
|
+
|
|
12
|
+
按照固定的字符数分块,并支持块之间的重叠。
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def __init__(self, chunk_size: int = 512, chunk_overlap: int = 50):
|
|
16
|
+
"""初始化分块器
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
chunk_size: 块的大小(字符数)
|
|
20
|
+
chunk_overlap: 块之间的重叠字符数
|
|
21
|
+
"""
|
|
22
|
+
self.chunk_size = chunk_size
|
|
23
|
+
self.chunk_overlap = chunk_overlap
|
|
24
|
+
|
|
25
|
+
async def chunk(self, text: str, **kwargs) -> list[str]:
|
|
26
|
+
"""固定大小分块
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
text: 输入文本
|
|
30
|
+
chunk_size: 每个文本块的最大大小
|
|
31
|
+
chunk_overlap: 每个文本块之间的重叠部分大小
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
list[str]: 分块后的文本列表
|
|
35
|
+
"""
|
|
36
|
+
chunk_size = kwargs.get("chunk_size", self.chunk_size)
|
|
37
|
+
chunk_overlap = kwargs.get("chunk_overlap", self.chunk_overlap)
|
|
38
|
+
|
|
39
|
+
chunks = []
|
|
40
|
+
start = 0
|
|
41
|
+
text_len = len(text)
|
|
42
|
+
|
|
43
|
+
while start < text_len:
|
|
44
|
+
end = start + chunk_size
|
|
45
|
+
chunk = text[start:end]
|
|
46
|
+
|
|
47
|
+
if chunk:
|
|
48
|
+
chunks.append(chunk)
|
|
49
|
+
|
|
50
|
+
# 移动窗口,保留重叠部分
|
|
51
|
+
start = end - chunk_overlap
|
|
52
|
+
|
|
53
|
+
# 防止无限循环: 如果重叠过大,直接移到end
|
|
54
|
+
if start >= end or chunk_overlap >= chunk_size:
|
|
55
|
+
start = end
|
|
56
|
+
|
|
57
|
+
return chunks
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
from collections.abc import Callable
|
|
2
|
+
from .base import BaseChunker
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class RecursiveCharacterChunker(BaseChunker):
|
|
6
|
+
def __init__(
|
|
7
|
+
self,
|
|
8
|
+
chunk_size: int = 500,
|
|
9
|
+
chunk_overlap: int = 100,
|
|
10
|
+
length_function: Callable[[str], int] = len,
|
|
11
|
+
is_separator_regex: bool = False,
|
|
12
|
+
separators: list[str] | None = None,
|
|
13
|
+
):
|
|
14
|
+
"""
|
|
15
|
+
初始化递归字符文本分割器
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
chunk_size: 每个文本块的最大大小
|
|
19
|
+
chunk_overlap: 每个文本块之间的重叠部分大小
|
|
20
|
+
length_function: 计算文本长度的函数
|
|
21
|
+
is_separator_regex: 分隔符是否为正则表达式
|
|
22
|
+
separators: 用于分割文本的分隔符列表,按优先级排序
|
|
23
|
+
"""
|
|
24
|
+
self.chunk_size = chunk_size
|
|
25
|
+
self.chunk_overlap = chunk_overlap
|
|
26
|
+
self.length_function = length_function
|
|
27
|
+
self.is_separator_regex = is_separator_regex
|
|
28
|
+
|
|
29
|
+
# 默认分隔符列表,按优先级从高到低
|
|
30
|
+
self.separators = separators or [
|
|
31
|
+
"\n\n", # 段落
|
|
32
|
+
"\n", # 换行
|
|
33
|
+
"。", # 中文句子
|
|
34
|
+
",", # 中文逗号
|
|
35
|
+
". ", # 句子
|
|
36
|
+
", ", # 逗号分隔
|
|
37
|
+
" ", # 单词
|
|
38
|
+
"", # 字符
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
async def chunk(self, text: str, **kwargs) -> list[str]:
|
|
42
|
+
"""
|
|
43
|
+
递归地将文本分割成块
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
text: 要分割的文本
|
|
47
|
+
chunk_size: 每个文本块的最大大小
|
|
48
|
+
chunk_overlap: 每个文本块之间的重叠部分大小
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
分割后的文本块列表
|
|
52
|
+
"""
|
|
53
|
+
if not text:
|
|
54
|
+
return []
|
|
55
|
+
|
|
56
|
+
overlap = kwargs.get("chunk_overlap", self.chunk_overlap)
|
|
57
|
+
chunk_size = kwargs.get("chunk_size", self.chunk_size)
|
|
58
|
+
|
|
59
|
+
text_length = self.length_function(text)
|
|
60
|
+
if text_length <= chunk_size:
|
|
61
|
+
return [text]
|
|
62
|
+
|
|
63
|
+
for separator in self.separators:
|
|
64
|
+
if separator == "":
|
|
65
|
+
return self._split_by_character(text, chunk_size, overlap)
|
|
66
|
+
|
|
67
|
+
if separator in text:
|
|
68
|
+
splits = text.split(separator)
|
|
69
|
+
# 重新添加分隔符(除了最后一个片段)
|
|
70
|
+
splits = [s + separator for s in splits[:-1]] + [splits[-1]]
|
|
71
|
+
splits = [s for s in splits if s]
|
|
72
|
+
if len(splits) == 1:
|
|
73
|
+
continue
|
|
74
|
+
|
|
75
|
+
# 递归合并分割后的文本块
|
|
76
|
+
final_chunks = []
|
|
77
|
+
current_chunk = []
|
|
78
|
+
current_chunk_length = 0
|
|
79
|
+
|
|
80
|
+
for split in splits:
|
|
81
|
+
split_length = self.length_function(split)
|
|
82
|
+
|
|
83
|
+
# 如果单个分割部分已经超过了chunk_size,需要递归分割
|
|
84
|
+
if split_length > chunk_size:
|
|
85
|
+
# 先处理当前积累的块
|
|
86
|
+
if current_chunk:
|
|
87
|
+
combined_text = "".join(current_chunk)
|
|
88
|
+
final_chunks.extend(
|
|
89
|
+
await self.chunk(
|
|
90
|
+
combined_text,
|
|
91
|
+
chunk_size=chunk_size,
|
|
92
|
+
chunk_overlap=overlap,
|
|
93
|
+
)
|
|
94
|
+
)
|
|
95
|
+
current_chunk = []
|
|
96
|
+
current_chunk_length = 0
|
|
97
|
+
|
|
98
|
+
# 递归分割过大的部分
|
|
99
|
+
final_chunks.extend(
|
|
100
|
+
await self.chunk(
|
|
101
|
+
split, chunk_size=chunk_size, chunk_overlap=overlap
|
|
102
|
+
)
|
|
103
|
+
)
|
|
104
|
+
# 如果添加这部分会使当前块超过chunk_size
|
|
105
|
+
elif current_chunk_length + split_length > chunk_size:
|
|
106
|
+
# 合并当前块并添加到结果中
|
|
107
|
+
combined_text = "".join(current_chunk)
|
|
108
|
+
final_chunks.append(combined_text)
|
|
109
|
+
|
|
110
|
+
# 处理重叠部分
|
|
111
|
+
overlap_start = max(0, len(combined_text) - overlap)
|
|
112
|
+
if overlap_start > 0:
|
|
113
|
+
overlap_text = combined_text[overlap_start:]
|
|
114
|
+
current_chunk = [overlap_text, split]
|
|
115
|
+
current_chunk_length = (
|
|
116
|
+
self.length_function(overlap_text) + split_length
|
|
117
|
+
)
|
|
118
|
+
else:
|
|
119
|
+
current_chunk = [split]
|
|
120
|
+
current_chunk_length = split_length
|
|
121
|
+
else:
|
|
122
|
+
# 添加到当前块
|
|
123
|
+
current_chunk.append(split)
|
|
124
|
+
current_chunk_length += split_length
|
|
125
|
+
|
|
126
|
+
# 处理剩余的块
|
|
127
|
+
if current_chunk:
|
|
128
|
+
final_chunks.append("".join(current_chunk))
|
|
129
|
+
|
|
130
|
+
return final_chunks
|
|
131
|
+
|
|
132
|
+
return [text]
|
|
133
|
+
|
|
134
|
+
def _split_by_character(
|
|
135
|
+
self, text: str, chunk_size: int | None = None, overlap: int | None = None
|
|
136
|
+
) -> list[str]:
|
|
137
|
+
"""
|
|
138
|
+
按字符级别分割文本
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
text: 要分割的文本
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
分割后的文本块列表
|
|
145
|
+
"""
|
|
146
|
+
chunk_size = chunk_size or self.chunk_size
|
|
147
|
+
overlap = overlap or self.chunk_overlap
|
|
148
|
+
result = []
|
|
149
|
+
for i in range(0, len(text), chunk_size - overlap):
|
|
150
|
+
end = min(i + chunk_size, len(text))
|
|
151
|
+
result.append(text[i:end])
|
|
152
|
+
if end == len(text):
|
|
153
|
+
break
|
|
154
|
+
|
|
155
|
+
return result
|
|
@@ -0,0 +1,299 @@
|
|
|
1
|
+
from contextlib import asynccontextmanager
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
from sqlmodel import col, desc
|
|
5
|
+
from sqlalchemy import text, func, select, update, delete
|
|
6
|
+
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine
|
|
7
|
+
|
|
8
|
+
from astrbot.core import logger
|
|
9
|
+
from astrbot.core.knowledge_base.models import (
|
|
10
|
+
BaseKBModel,
|
|
11
|
+
KBDocument,
|
|
12
|
+
KBMedia,
|
|
13
|
+
KnowledgeBase,
|
|
14
|
+
)
|
|
15
|
+
from astrbot.core.db.vec_db.faiss_impl import FaissVecDB
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class KBSQLiteDatabase:
|
|
19
|
+
def __init__(self, db_path: str = "data/knowledge_base/kb.db") -> None:
|
|
20
|
+
"""初始化知识库数据库
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
db_path: 数据库文件路径, 默认为 data/knowledge_base/kb.db
|
|
24
|
+
"""
|
|
25
|
+
self.db_path = db_path
|
|
26
|
+
self.DATABASE_URL = f"sqlite+aiosqlite:///{db_path}"
|
|
27
|
+
self.inited = False
|
|
28
|
+
|
|
29
|
+
# 确保目录存在
|
|
30
|
+
Path(db_path).parent.mkdir(parents=True, exist_ok=True)
|
|
31
|
+
|
|
32
|
+
# 创建异步引擎
|
|
33
|
+
self.engine = create_async_engine(
|
|
34
|
+
self.DATABASE_URL,
|
|
35
|
+
echo=False,
|
|
36
|
+
pool_pre_ping=True,
|
|
37
|
+
pool_recycle=3600,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
# 创建会话工厂
|
|
41
|
+
self.async_session = async_sessionmaker(
|
|
42
|
+
self.engine,
|
|
43
|
+
class_=AsyncSession,
|
|
44
|
+
expire_on_commit=False,
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
@asynccontextmanager
|
|
48
|
+
async def get_db(self):
|
|
49
|
+
"""获取数据库会话
|
|
50
|
+
|
|
51
|
+
用法:
|
|
52
|
+
async with kb_db.get_db() as session:
|
|
53
|
+
# 执行数据库操作
|
|
54
|
+
result = await session.execute(stmt)
|
|
55
|
+
"""
|
|
56
|
+
async with self.async_session() as session:
|
|
57
|
+
yield session
|
|
58
|
+
|
|
59
|
+
async def initialize(self) -> None:
|
|
60
|
+
"""初始化数据库,创建表并配置 SQLite 参数"""
|
|
61
|
+
async with self.engine.begin() as conn:
|
|
62
|
+
# 创建所有知识库相关表
|
|
63
|
+
await conn.run_sync(BaseKBModel.metadata.create_all)
|
|
64
|
+
|
|
65
|
+
# 配置 SQLite 性能优化参数
|
|
66
|
+
await conn.execute(text("PRAGMA journal_mode=WAL"))
|
|
67
|
+
await conn.execute(text("PRAGMA synchronous=NORMAL"))
|
|
68
|
+
await conn.execute(text("PRAGMA cache_size=20000"))
|
|
69
|
+
await conn.execute(text("PRAGMA temp_store=MEMORY"))
|
|
70
|
+
await conn.execute(text("PRAGMA mmap_size=134217728"))
|
|
71
|
+
await conn.execute(text("PRAGMA optimize"))
|
|
72
|
+
await conn.commit()
|
|
73
|
+
|
|
74
|
+
self.inited = True
|
|
75
|
+
|
|
76
|
+
async def migrate_to_v1(self) -> None:
|
|
77
|
+
"""执行知识库数据库 v1 迁移
|
|
78
|
+
|
|
79
|
+
创建所有必要的索引以优化查询性能
|
|
80
|
+
"""
|
|
81
|
+
async with self.get_db() as session:
|
|
82
|
+
session: AsyncSession
|
|
83
|
+
async with session.begin():
|
|
84
|
+
# 创建知识库表索引
|
|
85
|
+
await session.execute(
|
|
86
|
+
text(
|
|
87
|
+
"CREATE INDEX IF NOT EXISTS idx_kb_kb_id "
|
|
88
|
+
"ON knowledge_bases(kb_id)"
|
|
89
|
+
)
|
|
90
|
+
)
|
|
91
|
+
await session.execute(
|
|
92
|
+
text(
|
|
93
|
+
"CREATE INDEX IF NOT EXISTS idx_kb_name "
|
|
94
|
+
"ON knowledge_bases(kb_name)"
|
|
95
|
+
)
|
|
96
|
+
)
|
|
97
|
+
await session.execute(
|
|
98
|
+
text(
|
|
99
|
+
"CREATE INDEX IF NOT EXISTS idx_kb_created_at "
|
|
100
|
+
"ON knowledge_bases(created_at)"
|
|
101
|
+
)
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
# 创建文档表索引
|
|
105
|
+
await session.execute(
|
|
106
|
+
text(
|
|
107
|
+
"CREATE INDEX IF NOT EXISTS idx_doc_doc_id "
|
|
108
|
+
"ON kb_documents(doc_id)"
|
|
109
|
+
)
|
|
110
|
+
)
|
|
111
|
+
await session.execute(
|
|
112
|
+
text(
|
|
113
|
+
"CREATE INDEX IF NOT EXISTS idx_doc_kb_id "
|
|
114
|
+
"ON kb_documents(kb_id)"
|
|
115
|
+
)
|
|
116
|
+
)
|
|
117
|
+
await session.execute(
|
|
118
|
+
text(
|
|
119
|
+
"CREATE INDEX IF NOT EXISTS idx_doc_name "
|
|
120
|
+
"ON kb_documents(doc_name)"
|
|
121
|
+
)
|
|
122
|
+
)
|
|
123
|
+
await session.execute(
|
|
124
|
+
text(
|
|
125
|
+
"CREATE INDEX IF NOT EXISTS idx_doc_type "
|
|
126
|
+
"ON kb_documents(file_type)"
|
|
127
|
+
)
|
|
128
|
+
)
|
|
129
|
+
await session.execute(
|
|
130
|
+
text(
|
|
131
|
+
"CREATE INDEX IF NOT EXISTS idx_doc_created_at "
|
|
132
|
+
"ON kb_documents(created_at)"
|
|
133
|
+
)
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
# 创建多媒体表索引
|
|
137
|
+
await session.execute(
|
|
138
|
+
text(
|
|
139
|
+
"CREATE INDEX IF NOT EXISTS idx_media_media_id "
|
|
140
|
+
"ON kb_media(media_id)"
|
|
141
|
+
)
|
|
142
|
+
)
|
|
143
|
+
await session.execute(
|
|
144
|
+
text(
|
|
145
|
+
"CREATE INDEX IF NOT EXISTS idx_media_doc_id "
|
|
146
|
+
"ON kb_media(doc_id)"
|
|
147
|
+
)
|
|
148
|
+
)
|
|
149
|
+
await session.execute(
|
|
150
|
+
text(
|
|
151
|
+
"CREATE INDEX IF NOT EXISTS idx_media_kb_id ON kb_media(kb_id)"
|
|
152
|
+
)
|
|
153
|
+
)
|
|
154
|
+
await session.execute(
|
|
155
|
+
text(
|
|
156
|
+
"CREATE INDEX IF NOT EXISTS idx_media_type "
|
|
157
|
+
"ON kb_media(media_type)"
|
|
158
|
+
)
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
await session.commit()
|
|
162
|
+
|
|
163
|
+
async def close(self) -> None:
|
|
164
|
+
"""关闭数据库连接"""
|
|
165
|
+
await self.engine.dispose()
|
|
166
|
+
logger.info(f"知识库数据库已关闭: {self.db_path}")
|
|
167
|
+
|
|
168
|
+
async def get_kb_by_id(self, kb_id: str) -> KnowledgeBase | None:
|
|
169
|
+
"""根据 ID 获取知识库"""
|
|
170
|
+
async with self.get_db() as session:
|
|
171
|
+
stmt = select(KnowledgeBase).where(col(KnowledgeBase.kb_id) == kb_id)
|
|
172
|
+
result = await session.execute(stmt)
|
|
173
|
+
return result.scalar_one_or_none()
|
|
174
|
+
|
|
175
|
+
async def get_kb_by_name(self, kb_name: str) -> KnowledgeBase | None:
|
|
176
|
+
"""根据名称获取知识库"""
|
|
177
|
+
async with self.get_db() as session:
|
|
178
|
+
stmt = select(KnowledgeBase).where(col(KnowledgeBase.kb_name) == kb_name)
|
|
179
|
+
result = await session.execute(stmt)
|
|
180
|
+
return result.scalar_one_or_none()
|
|
181
|
+
|
|
182
|
+
async def list_kbs(self, offset: int = 0, limit: int = 100) -> list[KnowledgeBase]:
|
|
183
|
+
"""列出所有知识库"""
|
|
184
|
+
async with self.get_db() as session:
|
|
185
|
+
stmt = (
|
|
186
|
+
select(KnowledgeBase)
|
|
187
|
+
.offset(offset)
|
|
188
|
+
.limit(limit)
|
|
189
|
+
.order_by(desc(KnowledgeBase.created_at))
|
|
190
|
+
)
|
|
191
|
+
result = await session.execute(stmt)
|
|
192
|
+
return list(result.scalars().all())
|
|
193
|
+
|
|
194
|
+
async def count_kbs(self) -> int:
|
|
195
|
+
"""统计知识库数量"""
|
|
196
|
+
async with self.get_db() as session:
|
|
197
|
+
stmt = select(func.count(col(KnowledgeBase.id)))
|
|
198
|
+
result = await session.execute(stmt)
|
|
199
|
+
return result.scalar() or 0
|
|
200
|
+
|
|
201
|
+
# ===== 文档查询 =====
|
|
202
|
+
|
|
203
|
+
async def get_document_by_id(self, doc_id: str) -> KBDocument | None:
|
|
204
|
+
"""根据 ID 获取文档"""
|
|
205
|
+
async with self.get_db() as session:
|
|
206
|
+
stmt = select(KBDocument).where(col(KBDocument.doc_id) == doc_id)
|
|
207
|
+
result = await session.execute(stmt)
|
|
208
|
+
return result.scalar_one_or_none()
|
|
209
|
+
|
|
210
|
+
async def list_documents_by_kb(
|
|
211
|
+
self, kb_id: str, offset: int = 0, limit: int = 100
|
|
212
|
+
) -> list[KBDocument]:
|
|
213
|
+
"""列出知识库的所有文档"""
|
|
214
|
+
async with self.get_db() as session:
|
|
215
|
+
stmt = (
|
|
216
|
+
select(KBDocument)
|
|
217
|
+
.where(col(KBDocument.kb_id) == kb_id)
|
|
218
|
+
.offset(offset)
|
|
219
|
+
.limit(limit)
|
|
220
|
+
.order_by(desc(KBDocument.created_at))
|
|
221
|
+
)
|
|
222
|
+
result = await session.execute(stmt)
|
|
223
|
+
return list(result.scalars().all())
|
|
224
|
+
|
|
225
|
+
async def count_documents_by_kb(self, kb_id: str) -> int:
|
|
226
|
+
"""统计知识库的文档数量"""
|
|
227
|
+
async with self.get_db() as session:
|
|
228
|
+
stmt = select(func.count(col(KBDocument.id))).where(
|
|
229
|
+
col(KBDocument.kb_id) == kb_id
|
|
230
|
+
)
|
|
231
|
+
result = await session.execute(stmt)
|
|
232
|
+
return result.scalar() or 0
|
|
233
|
+
|
|
234
|
+
async def get_document_with_metadata(self, doc_id: str) -> dict | None:
|
|
235
|
+
async with self.get_db() as session:
|
|
236
|
+
stmt = (
|
|
237
|
+
select(KBDocument, KnowledgeBase)
|
|
238
|
+
.join(KnowledgeBase, col(KBDocument.kb_id) == col(KnowledgeBase.kb_id))
|
|
239
|
+
.where(col(KBDocument.doc_id) == doc_id)
|
|
240
|
+
)
|
|
241
|
+
result = await session.execute(stmt)
|
|
242
|
+
row = result.first()
|
|
243
|
+
|
|
244
|
+
if not row:
|
|
245
|
+
return None
|
|
246
|
+
|
|
247
|
+
return {
|
|
248
|
+
"document": row[0],
|
|
249
|
+
"knowledge_base": row[1],
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
async def delete_document_by_id(self, doc_id: str, vec_db: FaissVecDB):
|
|
253
|
+
"""删除单个文档及其相关数据"""
|
|
254
|
+
# 在知识库表中删除
|
|
255
|
+
async with self.get_db() as session:
|
|
256
|
+
async with session.begin():
|
|
257
|
+
# 删除文档记录
|
|
258
|
+
delete_stmt = delete(KBDocument).where(col(KBDocument.doc_id) == doc_id)
|
|
259
|
+
await session.execute(delete_stmt)
|
|
260
|
+
await session.commit()
|
|
261
|
+
|
|
262
|
+
# 在 vec db 中删除相关向量
|
|
263
|
+
await vec_db.delete_documents(metadata_filters={"kb_doc_id": doc_id})
|
|
264
|
+
|
|
265
|
+
# ===== 多媒体查询 =====
|
|
266
|
+
|
|
267
|
+
async def list_media_by_doc(self, doc_id: str) -> list[KBMedia]:
|
|
268
|
+
"""列出文档的所有多媒体资源"""
|
|
269
|
+
async with self.get_db() as session:
|
|
270
|
+
stmt = select(KBMedia).where(col(KBMedia.doc_id) == doc_id)
|
|
271
|
+
result = await session.execute(stmt)
|
|
272
|
+
return list(result.scalars().all())
|
|
273
|
+
|
|
274
|
+
async def get_media_by_id(self, media_id: str) -> KBMedia | None:
|
|
275
|
+
"""根据 ID 获取多媒体资源"""
|
|
276
|
+
async with self.get_db() as session:
|
|
277
|
+
stmt = select(KBMedia).where(col(KBMedia.media_id) == media_id)
|
|
278
|
+
result = await session.execute(stmt)
|
|
279
|
+
return result.scalar_one_or_none()
|
|
280
|
+
|
|
281
|
+
async def update_kb_stats(self, kb_id: str, vec_db: FaissVecDB) -> None:
|
|
282
|
+
"""更新知识库统计信息"""
|
|
283
|
+
chunk_cnt = await vec_db.count_documents()
|
|
284
|
+
|
|
285
|
+
async with self.get_db() as session:
|
|
286
|
+
async with session.begin():
|
|
287
|
+
update_stmt = (
|
|
288
|
+
update(KnowledgeBase)
|
|
289
|
+
.where(col(KnowledgeBase.kb_id) == kb_id)
|
|
290
|
+
.values(
|
|
291
|
+
doc_count=select(func.count(col(KBDocument.id)))
|
|
292
|
+
.where(col(KBDocument.kb_id) == kb_id)
|
|
293
|
+
.scalar_subquery(),
|
|
294
|
+
chunk_count=chunk_cnt,
|
|
295
|
+
)
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
await session.execute(update_stmt)
|
|
299
|
+
await session.commit()
|