jarvis-ai-assistant 0.1.219__py3-none-any.whl → 0.1.220__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jarvis/__init__.py +1 -1
- jarvis/jarvis_agent/__init__.py +36 -89
- jarvis/jarvis_data/config_schema.json +30 -0
- jarvis/jarvis_platform/yuanbao.py +3 -1
- jarvis/jarvis_rag/__init__.py +11 -0
- jarvis/jarvis_rag/cache.py +87 -0
- jarvis/jarvis_rag/cli.py +297 -0
- jarvis/jarvis_rag/embedding_manager.py +109 -0
- jarvis/jarvis_rag/llm_interface.py +130 -0
- jarvis/jarvis_rag/query_rewriter.py +63 -0
- jarvis/jarvis_rag/rag_pipeline.py +177 -0
- jarvis/jarvis_rag/reranker.py +56 -0
- jarvis/jarvis_rag/retriever.py +201 -0
- jarvis/jarvis_utils/config.py +71 -0
- {jarvis_ai_assistant-0.1.219.dist-info → jarvis_ai_assistant-0.1.220.dist-info}/METADATA +87 -8
- {jarvis_ai_assistant-0.1.219.dist-info → jarvis_ai_assistant-0.1.220.dist-info}/RECORD +20 -11
- {jarvis_ai_assistant-0.1.219.dist-info → jarvis_ai_assistant-0.1.220.dist-info}/entry_points.txt +1 -0
- {jarvis_ai_assistant-0.1.219.dist-info → jarvis_ai_assistant-0.1.220.dist-info}/WHEEL +0 -0
- {jarvis_ai_assistant-0.1.219.dist-info → jarvis_ai_assistant-0.1.220.dist-info}/licenses/LICENSE +0 -0
- {jarvis_ai_assistant-0.1.219.dist-info → jarvis_ai_assistant-0.1.220.dist-info}/top_level.txt +0 -0
jarvis/__init__.py
CHANGED
jarvis/jarvis_agent/__init__.py
CHANGED
@@ -36,94 +36,49 @@ from jarvis.jarvis_utils.tag import ct, ot
|
|
36
36
|
origin_agent_system_prompt = f"""
|
37
37
|
<role>
|
38
38
|
# 🤖 角色
|
39
|
-
|
39
|
+
你是一个专业的任务执行助手,根据用户需求制定并执行详细的计划。
|
40
40
|
</role>
|
41
41
|
|
42
|
-
<
|
43
|
-
#
|
44
|
-
1.
|
45
|
-
2.
|
46
|
-
3.
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
- 请求用户确认 → 永久挂起
|
54
|
-
</violations>
|
42
|
+
<rules>
|
43
|
+
# ❗ 核心规则
|
44
|
+
1. **单步操作**: 每个响应必须包含且仅包含一个工具调用。
|
45
|
+
2. **任务终结**: 当任务完成时,明确指出任务已完成。这是唯一可以不调用工具的例外。
|
46
|
+
3. **无响应错误**: 空响应或仅有分析无工具调用的响应是致命错误,会导致系统挂起。
|
47
|
+
4. **决策即工具**: 所有的决策和分析都必须通过工具调用来体现。
|
48
|
+
5. **等待结果**: 在继续下一步之前,必须等待当前工具的执行结果。
|
49
|
+
6. **持续推进**: 除非任务完成,否则必须生成可操作的下一步。
|
50
|
+
7. **记录沉淀**: 如果解决方案有普适价值,应记录为方法论。
|
51
|
+
8. **用户语言**: 始终使用用户的语言进行交流。
|
52
|
+
</rules>
|
55
53
|
|
56
54
|
<workflow>
|
57
|
-
# 🔄
|
58
|
-
1.
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
→ 必须调用分析工具
|
63
|
-
|
64
|
-
2. 解决方案设计
|
65
|
-
- 生成多个可执行的解决方案
|
66
|
-
- 评估并选择最优方案
|
67
|
-
- 使用PlantUML创建详细行动计划
|
68
|
-
→ 必须调用设计工具
|
69
|
-
|
70
|
-
3. 执行
|
71
|
-
- 一次执行一个步骤
|
72
|
-
- 每个步骤只使用一个工具
|
73
|
-
- 等待工具结果后再继续
|
74
|
-
- 监控结果并根据需要调整
|
75
|
-
→ 必须调用执行工具
|
76
|
-
|
77
|
-
4. 任务完成
|
78
|
-
- 验证目标完成情况
|
79
|
-
- 如有价值则记录方法论
|
55
|
+
# 🔄 工作流程
|
56
|
+
1. **分析**: 理解和分析问题,定义清晰的目标。
|
57
|
+
2. **设计**: 设计解决方案并制定详细的行动计划。
|
58
|
+
3. **执行**: 按照计划,一次一个步骤地执行。
|
59
|
+
4. **完成**: 验证任务是否达成目标,并进行总结。
|
80
60
|
</workflow>
|
81
61
|
|
82
|
-
<principles>
|
83
|
-
# ⚖️ 操作原则
|
84
|
-
- 每个步骤一个操作
|
85
|
-
- 下一步前必须等待结果
|
86
|
-
- 除非任务完成否则必须生成可操作步骤
|
87
|
-
- 根据反馈调整计划
|
88
|
-
- 记录可复用的解决方案
|
89
|
-
- 使用完成命令结束任务
|
90
|
-
- 操作之间不能有中间思考状态
|
91
|
-
- 所有决策必须表现为工具调用
|
92
|
-
</principles>
|
93
|
-
|
94
|
-
<rules>
|
95
|
-
# ❗ 重要规则
|
96
|
-
1. 每个步骤只能使用一个操作
|
97
|
-
2. 必须等待操作执行结果
|
98
|
-
3. 必须验证任务完成情况
|
99
|
-
4. 必须生成可操作步骤
|
100
|
-
5. 如果无需操作必须使用完成命令
|
101
|
-
6. 永远不要使对话处于等待状态
|
102
|
-
7. 始终使用用户语言交流
|
103
|
-
8. 必须记录有价值的方法论
|
104
|
-
9. 违反操作协议将导致系统崩溃
|
105
|
-
10. 空响应会触发永久挂起
|
106
|
-
</rules>
|
107
|
-
|
108
62
|
<system_info>
|
109
|
-
#
|
110
|
-
{platform.platform()}
|
111
|
-
{
|
112
|
-
|
113
|
-
# 当前时间
|
114
|
-
{datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
|
63
|
+
# 系统信息
|
64
|
+
- OS: {platform.platform()} {platform.version()}
|
65
|
+
- Time: {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
|
115
66
|
</system_info>
|
116
67
|
"""
|
117
68
|
|
118
69
|
|
119
70
|
class OutputHandlerProtocol(Protocol):
|
120
|
-
def name(self) -> str:
|
71
|
+
def name(self) -> str:
|
72
|
+
...
|
121
73
|
|
122
|
-
def can_handle(self, response: str) -> bool:
|
74
|
+
def can_handle(self, response: str) -> bool:
|
75
|
+
...
|
123
76
|
|
124
|
-
def prompt(self) -> str:
|
77
|
+
def prompt(self) -> str:
|
78
|
+
...
|
125
79
|
|
126
|
-
def handle(self, response: str, agent: Any) -> Tuple[bool, Any]:
|
80
|
+
def handle(self, response: str, agent: Any) -> Tuple[bool, Any]:
|
81
|
+
...
|
127
82
|
|
128
83
|
|
129
84
|
class Agent:
|
@@ -189,9 +144,7 @@ class Agent:
|
|
189
144
|
if isinstance(platform, str):
|
190
145
|
self.model = PlatformRegistry().create_platform(platform)
|
191
146
|
if self.model is None:
|
192
|
-
PrettyOutput.print(
|
193
|
-
f"平台 {platform} 不存在,将使用普通模型", OutputType.WARNING
|
194
|
-
)
|
147
|
+
PrettyOutput.print(f"平台 {platform} 不存在,将使用普通模型", OutputType.WARNING)
|
195
148
|
self.model = PlatformRegistry().get_normal_platform()
|
196
149
|
else:
|
197
150
|
self.model = platform
|
@@ -816,18 +769,14 @@ arguments:
|
|
816
769
|
|
817
770
|
if get_interrupt():
|
818
771
|
set_interrupt(False)
|
819
|
-
user_input = self.multiline_inputer(
|
820
|
-
f"模型交互期间被中断,请输入用户干预信息:"
|
821
|
-
)
|
772
|
+
user_input = self.multiline_inputer(f"模型交互期间被中断,请输入用户干预信息:")
|
822
773
|
if user_input:
|
823
774
|
# 如果有工具调用且用户确认继续,则将干预信息和工具执行结果拼接为prompt
|
824
775
|
if any(
|
825
776
|
handler.can_handle(current_response)
|
826
777
|
for handler in self.output_handler
|
827
778
|
):
|
828
|
-
if user_confirm(
|
829
|
-
"检测到有工具调用,是否继续处理工具调用?", True
|
830
|
-
):
|
779
|
+
if user_confirm("检测到有工具调用,是否继续处理工具调用?", True):
|
831
780
|
self.prompt = f"{user_input}\n\n{current_response}"
|
832
781
|
continue
|
833
782
|
self.prompt += f"{user_input}"
|
@@ -873,9 +822,7 @@ arguments:
|
|
873
822
|
if self.use_methodology:
|
874
823
|
if not upload_methodology(self.model, other_files=self.files):
|
875
824
|
if self.files:
|
876
|
-
PrettyOutput.print(
|
877
|
-
"文件上传失败,将忽略文件列表", OutputType.WARNING
|
878
|
-
)
|
825
|
+
PrettyOutput.print("文件上传失败,将忽略文件列表", OutputType.WARNING)
|
879
826
|
# 上传失败则回退到本地加载
|
880
827
|
msg = self.prompt
|
881
828
|
for handler in self.input_handler:
|
@@ -883,14 +830,14 @@ arguments:
|
|
883
830
|
self.prompt = f"{self.prompt}\n\n以下是历史类似问题的执行经验,可参考:\n{load_methodology(msg, self.get_tool_registry())}"
|
884
831
|
else:
|
885
832
|
if self.files:
|
886
|
-
self.prompt =
|
833
|
+
self.prompt = (
|
834
|
+
f"{self.prompt}\n\n上传的文件包含历史对话信息和方法论文件,可以从中获取一些经验信息。"
|
835
|
+
)
|
887
836
|
else:
|
888
837
|
self.prompt = f"{self.prompt}\n\n上传的文件包含历史对话信息,可以从中获取一些经验信息。"
|
889
838
|
elif self.files:
|
890
839
|
if not self.model.upload_files(self.files):
|
891
|
-
PrettyOutput.print(
|
892
|
-
"文件上传失败,将忽略文件列表", OutputType.WARNING
|
893
|
-
)
|
840
|
+
PrettyOutput.print("文件上传失败,将忽略文件列表", OutputType.WARNING)
|
894
841
|
else:
|
895
842
|
self.prompt = f"{self.prompt}\n\n上传的文件包含历史对话信息,可以从中获取一些经验信息。"
|
896
843
|
else:
|
@@ -181,6 +181,36 @@
|
|
181
181
|
"description": "是否打印提示",
|
182
182
|
"default": false
|
183
183
|
},
|
184
|
+
"JARVIS_RAG": {
|
185
|
+
"type": "object",
|
186
|
+
"description": "RAG框架的配置",
|
187
|
+
"properties": {
|
188
|
+
"embedding_mode": {
|
189
|
+
"type": "string",
|
190
|
+
"enum": [
|
191
|
+
"performance",
|
192
|
+
"accuracy"
|
193
|
+
],
|
194
|
+
"default": "performance",
|
195
|
+
"description": "嵌入模型的模式, 'performance'表示性能优先, 'accuracy'表示准确度优先"
|
196
|
+
},
|
197
|
+
"embedding_cache_path": {
|
198
|
+
"type": "string",
|
199
|
+
"default": ".jarvis/rag/embeddings",
|
200
|
+
"description": "嵌入向量缓存的路径, 相对于当前工作目录"
|
201
|
+
},
|
202
|
+
"vector_db_path": {
|
203
|
+
"type": "string",
|
204
|
+
"default": ".jarvis/rag/vectordb",
|
205
|
+
"description": "向量数据库的持久化存储路径, 相对于当前工作目录"
|
206
|
+
}
|
207
|
+
},
|
208
|
+
"default": {
|
209
|
+
"embedding_mode": "performance",
|
210
|
+
"embedding_cache_path": ".jarvis/rag/embeddings",
|
211
|
+
"vector_db_path": ".jarvis/rag/vectordb"
|
212
|
+
}
|
213
|
+
},
|
184
214
|
"JARVIS_REPLACE_MAP": {
|
185
215
|
"type": "object",
|
186
216
|
"description": "自定义替换映射表配置",
|
@@ -38,7 +38,9 @@ class YuanbaoPlatform(BasePlatform):
|
|
38
38
|
self.agent_id = "naQivTmsDa"
|
39
39
|
|
40
40
|
if not self.cookies:
|
41
|
-
|
41
|
+
raise ValueError(
|
42
|
+
"YUANBAO_COOKIES environment variable not set. Please provide your cookies to use the Yuanbao platform."
|
43
|
+
)
|
42
44
|
|
43
45
|
self.system_message = "" # 系统消息,用于初始化对话
|
44
46
|
self.first_chat = True # 标识是否为第一次对话
|
@@ -0,0 +1,11 @@
|
|
1
|
+
"""
|
2
|
+
Jarvis RAG Framework
|
3
|
+
|
4
|
+
A flexible RAG pipeline with pluggable remote LLMs and local, cache-enabled embedding models.
|
5
|
+
"""
|
6
|
+
|
7
|
+
from .rag_pipeline import JarvisRAGPipeline
|
8
|
+
from .llm_interface import LLMInterface
|
9
|
+
from .embedding_manager import EmbeddingManager
|
10
|
+
|
11
|
+
__all__ = ["JarvisRAGPipeline", "LLMInterface", "EmbeddingManager"]
|
@@ -0,0 +1,87 @@
|
|
1
|
+
import hashlib
|
2
|
+
from typing import List, Optional, Any
|
3
|
+
|
4
|
+
from diskcache import Cache
|
5
|
+
|
6
|
+
|
7
|
+
class EmbeddingCache:
|
8
|
+
"""
|
9
|
+
A disk-based cache for storing and retrieving text embeddings.
|
10
|
+
|
11
|
+
This class uses diskcache to create a persistent, local cache. It generates
|
12
|
+
a key for each text content based on its SHA256 hash, making lookups
|
13
|
+
deterministic and efficient.
|
14
|
+
"""
|
15
|
+
|
16
|
+
def __init__(self, cache_dir: str, salt: str = ""):
|
17
|
+
"""
|
18
|
+
Initializes the EmbeddingCache.
|
19
|
+
|
20
|
+
Args:
|
21
|
+
cache_dir (str): The directory where the cache will be stored.
|
22
|
+
salt (str): A salt to be added to the hash. This is crucial for
|
23
|
+
ensuring that embeddings generated by different models
|
24
|
+
do not collide. For example, use the model name as a salt.
|
25
|
+
"""
|
26
|
+
self.cache = Cache(cache_dir)
|
27
|
+
self.salt = salt
|
28
|
+
|
29
|
+
def _get_key(self, text: str) -> str:
|
30
|
+
"""Generates a unique cache key for a given text and salt."""
|
31
|
+
hash_object = hashlib.sha256((self.salt + text).encode("utf-8"))
|
32
|
+
return hash_object.hexdigest()
|
33
|
+
|
34
|
+
def get(self, text: str) -> Optional[Any]:
|
35
|
+
"""
|
36
|
+
Retrieves an embedding from the cache.
|
37
|
+
|
38
|
+
Args:
|
39
|
+
text (str): The text to look up.
|
40
|
+
|
41
|
+
Returns:
|
42
|
+
The cached embedding, or None if it's not in the cache.
|
43
|
+
"""
|
44
|
+
key = self._get_key(text)
|
45
|
+
return self.cache.get(key)
|
46
|
+
|
47
|
+
def set(self, text: str, embedding: Any) -> None:
|
48
|
+
"""
|
49
|
+
Stores an embedding in the cache.
|
50
|
+
|
51
|
+
Args:
|
52
|
+
text (str): The text corresponding to the embedding.
|
53
|
+
embedding (Any): The embedding vector to store.
|
54
|
+
"""
|
55
|
+
key = self._get_key(text)
|
56
|
+
self.cache.set(key, embedding)
|
57
|
+
|
58
|
+
def get_batch(self, texts: List[str]) -> List[Optional[Any]]:
|
59
|
+
"""
|
60
|
+
Retrieves a batch of embeddings from the cache.
|
61
|
+
|
62
|
+
Args:
|
63
|
+
texts (List[str]): A list of texts to look up.
|
64
|
+
|
65
|
+
Returns:
|
66
|
+
A list containing cached embeddings or None for cache misses.
|
67
|
+
"""
|
68
|
+
return [self.get(text) for text in texts]
|
69
|
+
|
70
|
+
def set_batch(self, texts: List[str], embeddings: List[Any]) -> None:
|
71
|
+
"""
|
72
|
+
Stores a batch of embeddings in the cache.
|
73
|
+
|
74
|
+
Args:
|
75
|
+
texts (List[str]): The list of texts.
|
76
|
+
embeddings (List[Any]): The list of corresponding embeddings.
|
77
|
+
"""
|
78
|
+
if len(texts) != len(embeddings):
|
79
|
+
raise ValueError("Length of texts and embeddings must be the same.")
|
80
|
+
|
81
|
+
with self.cache.transact():
|
82
|
+
for text, embedding in zip(texts, embeddings):
|
83
|
+
self.set(text, embedding)
|
84
|
+
|
85
|
+
def close(self):
|
86
|
+
"""Closes the cache connection."""
|
87
|
+
self.cache.close()
|
jarvis/jarvis_rag/cli.py
ADDED
@@ -0,0 +1,297 @@
|
|
1
|
+
import os
|
2
|
+
import sys
|
3
|
+
from pathlib import Path
|
4
|
+
from typing import Optional, List, Literal, cast
|
5
|
+
import mimetypes
|
6
|
+
|
7
|
+
import typer
|
8
|
+
from langchain.docstore.document import Document
|
9
|
+
from langchain_community.document_loaders import (
|
10
|
+
TextLoader,
|
11
|
+
UnstructuredMarkdownLoader,
|
12
|
+
)
|
13
|
+
from langchain_core.document_loaders.base import BaseLoader
|
14
|
+
from rich.markdown import Markdown
|
15
|
+
|
16
|
+
from jarvis.jarvis_utils.utils import init_env
|
17
|
+
|
18
|
+
|
19
|
+
def is_likely_text_file(file_path: Path) -> bool:
|
20
|
+
"""
|
21
|
+
Checks if a file is likely to be a text file by reading its beginning.
|
22
|
+
Avoids loading large binary files into memory.
|
23
|
+
"""
|
24
|
+
try:
|
25
|
+
# Heuristic 1: Check MIME type if available
|
26
|
+
mime_type, _ = mimetypes.guess_type(file_path)
|
27
|
+
if mime_type and mime_type.startswith("text/"):
|
28
|
+
return True
|
29
|
+
if mime_type and any(x in mime_type for x in ["json", "xml", "javascript"]):
|
30
|
+
return True
|
31
|
+
|
32
|
+
# Heuristic 2: Check for null bytes in the first few KB
|
33
|
+
with open(file_path, "rb") as f:
|
34
|
+
chunk = f.read(4096) # Read first 4KB
|
35
|
+
if b"\x00" in chunk:
|
36
|
+
return False # Null bytes are a strong indicator of a binary file
|
37
|
+
return True
|
38
|
+
except Exception:
|
39
|
+
return False
|
40
|
+
|
41
|
+
|
42
|
+
# Ensure the project root is in the Python path to allow absolute imports
|
43
|
+
# This makes the script runnable as a module.
|
44
|
+
_project_root = os.path.abspath(
|
45
|
+
os.path.join(os.path.dirname(__file__), "..", "..", "..")
|
46
|
+
)
|
47
|
+
if _project_root not in sys.path:
|
48
|
+
sys.path.insert(0, _project_root)
|
49
|
+
|
50
|
+
from jarvis.jarvis_platform.base import BasePlatform
|
51
|
+
from jarvis.jarvis_platform.registry import PlatformRegistry
|
52
|
+
from jarvis.jarvis_rag.llm_interface import LLMInterface
|
53
|
+
from jarvis.jarvis_rag.rag_pipeline import JarvisRAGPipeline
|
54
|
+
|
55
|
+
app = typer.Typer(
|
56
|
+
name="jarvis-rag",
|
57
|
+
help="A command-line tool to interact with the Jarvis RAG framework.",
|
58
|
+
add_completion=False,
|
59
|
+
)
|
60
|
+
|
61
|
+
|
62
|
+
class _CustomPlatformLLM(LLMInterface):
|
63
|
+
"""A simple wrapper to make a BasePlatform instance compatible with LLMInterface."""
|
64
|
+
|
65
|
+
def __init__(self, platform: BasePlatform):
|
66
|
+
self.platform = platform
|
67
|
+
print(
|
68
|
+
f"✅ 使用自定义LLM: 平台='{platform.platform_name()}', 模型='{platform.name()}'"
|
69
|
+
)
|
70
|
+
|
71
|
+
def generate(self, prompt: str, **kwargs) -> str:
|
72
|
+
return self.platform.chat_until_success(prompt)
|
73
|
+
|
74
|
+
|
75
|
+
def _create_custom_llm(platform_name: str, model_name: str) -> Optional[LLMInterface]:
|
76
|
+
"""Creates an LLM interface from a specific platform and model."""
|
77
|
+
if not platform_name or not model_name:
|
78
|
+
return None
|
79
|
+
try:
|
80
|
+
registry = PlatformRegistry.get_global_platform_registry()
|
81
|
+
platform_instance = registry.create_platform(platform_name)
|
82
|
+
if not platform_instance:
|
83
|
+
print(f"❌ 错误: 平台 '{platform_name}' 未找到。")
|
84
|
+
return None
|
85
|
+
platform_instance.set_model_name(model_name)
|
86
|
+
platform_instance.set_suppress_output(True)
|
87
|
+
return _CustomPlatformLLM(platform_instance)
|
88
|
+
except Exception as e:
|
89
|
+
print(f"❌ 创建自定义LLM时出错: {e}")
|
90
|
+
return None
|
91
|
+
|
92
|
+
|
93
|
+
@app.command(
|
94
|
+
"add",
|
95
|
+
help="Add documents from files, directories, or glob patterns (e.g., 'src/**/*.py').",
|
96
|
+
)
|
97
|
+
def add_documents(
|
98
|
+
paths: List[Path] = typer.Argument(
|
99
|
+
...,
|
100
|
+
help="File/directory paths or glob patterns. Shell expansion is supported.",
|
101
|
+
),
|
102
|
+
collection_name: str = typer.Option(
|
103
|
+
"jarvis_rag_collection",
|
104
|
+
"--collection",
|
105
|
+
"-c",
|
106
|
+
help="Name of the collection in the vector database.",
|
107
|
+
),
|
108
|
+
embedding_mode: Optional[str] = typer.Option(
|
109
|
+
None,
|
110
|
+
"--embedding-mode",
|
111
|
+
"-e",
|
112
|
+
help="Embedding mode ('performance' or 'accuracy'). Overrides global config.",
|
113
|
+
),
|
114
|
+
db_path: Optional[Path] = typer.Option(
|
115
|
+
None, "--db-path", help="Path to the vector database. Overrides global config."
|
116
|
+
),
|
117
|
+
):
|
118
|
+
"""Adds documents to the RAG knowledge base from various sources."""
|
119
|
+
files_to_process = set()
|
120
|
+
|
121
|
+
for path_str in paths:
|
122
|
+
# Typer with List[Path] might not expand globs, so we do it manually
|
123
|
+
from glob import glob
|
124
|
+
|
125
|
+
expanded_paths = glob(str(path_str), recursive=True)
|
126
|
+
|
127
|
+
for p_str in expanded_paths:
|
128
|
+
path = Path(p_str)
|
129
|
+
if not path.exists():
|
130
|
+
continue
|
131
|
+
|
132
|
+
if path.is_dir():
|
133
|
+
print(f"🔍 正在扫描目录: {path}")
|
134
|
+
for item in path.rglob("*"):
|
135
|
+
if item.is_file() and is_likely_text_file(item):
|
136
|
+
files_to_process.add(item)
|
137
|
+
elif path.is_file():
|
138
|
+
if is_likely_text_file(path):
|
139
|
+
files_to_process.add(path)
|
140
|
+
else:
|
141
|
+
print(f"⚠️ 跳过可能的二进制文件: {path}")
|
142
|
+
|
143
|
+
if not files_to_process:
|
144
|
+
print(f"⚠️ 在指定路径中未找到任何文本文件。")
|
145
|
+
return
|
146
|
+
|
147
|
+
print(f"✅ 发现 {len(files_to_process)} 个独立文件待处理。")
|
148
|
+
|
149
|
+
try:
|
150
|
+
pipeline = JarvisRAGPipeline(
|
151
|
+
embedding_mode=cast(
|
152
|
+
Optional[Literal["performance", "accuracy"]], embedding_mode
|
153
|
+
),
|
154
|
+
db_path=str(db_path) if db_path else None,
|
155
|
+
collection_name=collection_name,
|
156
|
+
)
|
157
|
+
|
158
|
+
docs: List[Document] = []
|
159
|
+
loader: BaseLoader
|
160
|
+
for file_path in sorted(list(files_to_process)):
|
161
|
+
try:
|
162
|
+
if file_path.suffix.lower() == ".md":
|
163
|
+
loader = UnstructuredMarkdownLoader(str(file_path))
|
164
|
+
else: # Default to TextLoader for .txt and all code files
|
165
|
+
loader = TextLoader(str(file_path), encoding="utf-8")
|
166
|
+
|
167
|
+
docs.extend(loader.load())
|
168
|
+
print(f"✅ 已加载: {file_path}")
|
169
|
+
except Exception as e:
|
170
|
+
print(f"⚠️ 加载失败 {file_path}: {e}")
|
171
|
+
|
172
|
+
if not docs:
|
173
|
+
print("❌ 未能成功加载任何文档。")
|
174
|
+
raise typer.Exit(code=1)
|
175
|
+
|
176
|
+
pipeline.add_documents(docs)
|
177
|
+
print(f"✅ 成功将 {len(docs)} 个文档的内容添加至集合 '{collection_name}'。")
|
178
|
+
|
179
|
+
except Exception as e:
|
180
|
+
print(f"❌ 发生严重错误: {e}")
|
181
|
+
raise typer.Exit(code=1)
|
182
|
+
|
183
|
+
|
184
|
+
@app.command("list-docs", help="List all unique documents in the knowledge base.")
|
185
|
+
def list_documents(
|
186
|
+
collection_name: str = typer.Option(
|
187
|
+
"jarvis_rag_collection",
|
188
|
+
"--collection",
|
189
|
+
"-c",
|
190
|
+
help="Name of the collection in the vector database.",
|
191
|
+
),
|
192
|
+
db_path: Optional[Path] = typer.Option(
|
193
|
+
None, "--db-path", help="Path to the vector database. Overrides global config."
|
194
|
+
),
|
195
|
+
):
|
196
|
+
"""Lists all unique documents in the specified collection."""
|
197
|
+
try:
|
198
|
+
pipeline = JarvisRAGPipeline(
|
199
|
+
db_path=str(db_path) if db_path else None,
|
200
|
+
collection_name=collection_name,
|
201
|
+
)
|
202
|
+
|
203
|
+
collection = pipeline.retriever.collection
|
204
|
+
results = collection.get() # Get all items in the collection
|
205
|
+
|
206
|
+
if not results or not results["metadatas"]:
|
207
|
+
print("ℹ️ 知识库中没有找到任何文档。")
|
208
|
+
return
|
209
|
+
|
210
|
+
# Extract unique source file paths from metadata
|
211
|
+
sources = set()
|
212
|
+
for metadata in results["metadatas"]:
|
213
|
+
if metadata:
|
214
|
+
source = metadata.get("source")
|
215
|
+
if isinstance(source, str):
|
216
|
+
sources.add(source)
|
217
|
+
|
218
|
+
if not sources:
|
219
|
+
print("ℹ️ 知识库中没有找到任何带有源信息的文档。")
|
220
|
+
return
|
221
|
+
|
222
|
+
print(f"📚 知识库 '{collection_name}' 中共有 {len(sources)} 个独立文档:")
|
223
|
+
for i, source in enumerate(sorted(list(sources)), 1):
|
224
|
+
print(f" {i}. {source}")
|
225
|
+
|
226
|
+
except Exception as e:
|
227
|
+
print(f"❌ 发生错误: {e}")
|
228
|
+
raise typer.Exit(code=1)
|
229
|
+
|
230
|
+
|
231
|
+
@app.command("query", help="Ask a question to the knowledge base.")
|
232
|
+
def query(
|
233
|
+
question: str = typer.Argument(..., help="The question to ask."),
|
234
|
+
collection_name: str = typer.Option(
|
235
|
+
"jarvis_rag_collection",
|
236
|
+
"--collection",
|
237
|
+
"-c",
|
238
|
+
help="Name of the collection in the vector database.",
|
239
|
+
),
|
240
|
+
embedding_mode: Optional[str] = typer.Option(
|
241
|
+
None,
|
242
|
+
"--embedding-mode",
|
243
|
+
"-e",
|
244
|
+
help="Embedding mode ('performance' or 'accuracy'). Overrides global config.",
|
245
|
+
),
|
246
|
+
db_path: Optional[Path] = typer.Option(
|
247
|
+
None, "--db-path", help="Path to the vector database. Overrides global config."
|
248
|
+
),
|
249
|
+
platform: Optional[str] = typer.Option(
|
250
|
+
None,
|
251
|
+
"--platform",
|
252
|
+
"-p",
|
253
|
+
help="Specify a platform name for the LLM. Overrides the default thinking model.",
|
254
|
+
),
|
255
|
+
model: Optional[str] = typer.Option(
|
256
|
+
None,
|
257
|
+
"--model",
|
258
|
+
"-m",
|
259
|
+
help="Specify a model name for the LLM. Requires --platform.",
|
260
|
+
),
|
261
|
+
):
|
262
|
+
"""Queries the RAG knowledge base and prints the answer."""
|
263
|
+
if model and not platform:
|
264
|
+
print("❌ 错误: --model 需要指定 --platform。")
|
265
|
+
raise typer.Exit(code=1)
|
266
|
+
|
267
|
+
try:
|
268
|
+
custom_llm = _create_custom_llm(platform, model) if platform and model else None
|
269
|
+
if (platform or model) and not custom_llm:
|
270
|
+
raise typer.Exit(code=1)
|
271
|
+
|
272
|
+
pipeline = JarvisRAGPipeline(
|
273
|
+
llm=custom_llm,
|
274
|
+
embedding_mode=cast(
|
275
|
+
Optional[Literal["performance", "accuracy"]], embedding_mode
|
276
|
+
),
|
277
|
+
db_path=str(db_path) if db_path else None,
|
278
|
+
collection_name=collection_name,
|
279
|
+
)
|
280
|
+
|
281
|
+
print(f"🤔 正在查询: '{question}'")
|
282
|
+
answer = pipeline.query(question)
|
283
|
+
|
284
|
+
print("💬 答案:")
|
285
|
+
# We can still use rich.markdown.Markdown as PrettyOutput uses rich underneath
|
286
|
+
from jarvis.jarvis_utils.globals import console
|
287
|
+
|
288
|
+
console.print(Markdown(answer))
|
289
|
+
|
290
|
+
except Exception as e:
|
291
|
+
print(f"❌ 发生错误: {e}")
|
292
|
+
raise typer.Exit(code=1)
|
293
|
+
|
294
|
+
|
295
|
+
def main():
|
296
|
+
init_env(welcome_str="Jarvis RAG")
|
297
|
+
app()
|