ws-bom-robot-app 0.0.57__py3-none-any.whl → 0.0.59__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ws_bom_robot_app/config.py +3 -0
- ws_bom_robot_app/llm/agent_handler.py +1 -2
- ws_bom_robot_app/llm/providers/llm_manager.py +1 -1
- ws_bom_robot_app/llm/tools/models/main.py +4 -0
- ws_bom_robot_app/llm/tools/tool_manager.py +89 -35
- ws_bom_robot_app/llm/tools/utils.py +16 -0
- {ws_bom_robot_app-0.0.57.dist-info → ws_bom_robot_app-0.0.59.dist-info}/METADATA +9 -7
- {ws_bom_robot_app-0.0.57.dist-info → ws_bom_robot_app-0.0.59.dist-info}/RECORD +10 -10
- {ws_bom_robot_app-0.0.57.dist-info → ws_bom_robot_app-0.0.59.dist-info}/WHEEL +0 -0
- {ws_bom_robot_app-0.0.57.dist-info → ws_bom_robot_app-0.0.59.dist-info}/top_level.txt +0 -0
ws_bom_robot_app/config.py
CHANGED
|
@@ -28,6 +28,7 @@ class Settings(BaseSettings):
|
|
|
28
28
|
GOOGLE_API_KEY: str = ''
|
|
29
29
|
NEBULY_API_URL: str =''
|
|
30
30
|
GOOGLE_APPLICATION_CREDENTIALS: str = '' # path to google credentials iam file, e.d. ./.secrets/google-credentials.json
|
|
31
|
+
TAVILY_API_KEY: str = '' #TODO DELETE
|
|
31
32
|
model_config = ConfigDict(
|
|
32
33
|
env_file='./.env',
|
|
33
34
|
extra='ignore',
|
|
@@ -43,6 +44,8 @@ class Settings(BaseSettings):
|
|
|
43
44
|
os.environ["GOOGLE_API_KEY"] = self.GOOGLE_API_KEY
|
|
44
45
|
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = self.GOOGLE_APPLICATION_CREDENTIALS
|
|
45
46
|
os.environ["NEBULY_API_URL"] = self.NEBULY_API_URL
|
|
47
|
+
# TODO DELETE
|
|
48
|
+
os.environ["TAVILY_API_KEY"] = self.TAVILY_API_KEY
|
|
46
49
|
|
|
47
50
|
class RuntimeOptions(BaseModel):
|
|
48
51
|
@staticmethod
|
|
@@ -41,7 +41,6 @@ class AgentHandler(AsyncCallbackHandler):
|
|
|
41
41
|
self.json_start_regex = re.compile(r'(`{1,3}\s*json\b)') # detect a potential json start fence.
|
|
42
42
|
self.json_end_regex = re.compile(r'(`{1,3})') # an end fence (one to three backticks).
|
|
43
43
|
self.stream_cut_last_output_chunk_size = 16 # safe cut last chunk size to output if no markers are found
|
|
44
|
-
|
|
45
44
|
async def on_chat_model_start(self, serialized, messages, *, run_id, parent_run_id = None, tags = None, metadata = None, **kwargs):
|
|
46
45
|
if not self.__started:
|
|
47
46
|
self.__started = True
|
|
@@ -102,7 +101,7 @@ class AgentHandler(AsyncCallbackHandler):
|
|
|
102
101
|
finally:
|
|
103
102
|
self.json_buffer = ""
|
|
104
103
|
# remove the end fence from pending.
|
|
105
|
-
self.stream_buffer = self.stream_buffer[end_match.end():].
|
|
104
|
+
self.stream_buffer = self.stream_buffer[end_match.end():].strip()
|
|
106
105
|
self.in_json_block = False
|
|
107
106
|
else:
|
|
108
107
|
# no end marker found
|
|
@@ -48,7 +48,7 @@ class OpenAI(LlmInterface):
|
|
|
48
48
|
api_key=self.config.api_key or os.getenv("OPENAI_API_KEY"),
|
|
49
49
|
model=self.config.model,
|
|
50
50
|
stream_usage=True)
|
|
51
|
-
if not any(self.config.model.startswith(prefix) for prefix in ["o1", "o3"]):
|
|
51
|
+
if not (any(self.config.model.startswith(prefix) for prefix in ["o1", "o3"]) or "search" in self.config.model):
|
|
52
52
|
chat.temperature = self.config.temperature
|
|
53
53
|
chat.streaming = True
|
|
54
54
|
return chat
|
|
@@ -7,3 +7,7 @@ class DocumentRetrieverInput(BaseModel):
|
|
|
7
7
|
class ImageGeneratorInput(BaseModel):
|
|
8
8
|
query: str = Field(description="description of the image to generate.")
|
|
9
9
|
language: str = Field(description="Language of the query. Default is 'it'", default="it")
|
|
10
|
+
class LlmChainInput(BaseModel):
|
|
11
|
+
input: str = Field(description="Input to the LLM chain")
|
|
12
|
+
class SearchOnlineInput(BaseModel):
|
|
13
|
+
query: str = Field(description="The search query string")
|
|
@@ -4,7 +4,7 @@ from ws_bom_robot_app.llm.models.api import LlmAppTool
|
|
|
4
4
|
from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
|
|
5
5
|
from ws_bom_robot_app.llm.vector_store.db.manager import VectorDbManager
|
|
6
6
|
from ws_bom_robot_app.llm.tools.utils import getRandomWaitingMessage, translate_text
|
|
7
|
-
from ws_bom_robot_app.llm.tools.models.main import NoopInput,DocumentRetrieverInput,ImageGeneratorInput
|
|
7
|
+
from ws_bom_robot_app.llm.tools.models.main import NoopInput,DocumentRetrieverInput,ImageGeneratorInput,LlmChainInput,SearchOnlineInput
|
|
8
8
|
from pydantic import BaseModel, ConfigDict
|
|
9
9
|
|
|
10
10
|
class ToolConfig(BaseModel):
|
|
@@ -41,6 +41,39 @@ class ToolManager:
|
|
|
41
41
|
self.callbacks = callbacks
|
|
42
42
|
self.queue = queue
|
|
43
43
|
|
|
44
|
+
async def __extract_documents(self, query: str, app_tool: LlmAppTool):
|
|
45
|
+
search_type = "similarity"
|
|
46
|
+
search_kwargs = {"k": 4}
|
|
47
|
+
if app_tool.search_settings:
|
|
48
|
+
search_settings = app_tool.search_settings # type: ignore
|
|
49
|
+
if search_settings.search_type == "similarityScoreThreshold":
|
|
50
|
+
search_type = "similarity_score_threshold"
|
|
51
|
+
search_kwargs = {
|
|
52
|
+
"score_threshold": search_settings.score_threshold_id if search_settings.score_threshold_id else 0.5,
|
|
53
|
+
"k": search_settings.search_k if search_settings.search_k else 100
|
|
54
|
+
}
|
|
55
|
+
elif search_settings.search_type == "mmr":
|
|
56
|
+
search_type = "mmr"
|
|
57
|
+
search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
|
|
58
|
+
elif search_settings.search_type == "default":
|
|
59
|
+
search_type = "similarity"
|
|
60
|
+
search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
|
|
61
|
+
else:
|
|
62
|
+
search_type = "mixed"
|
|
63
|
+
search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
|
|
64
|
+
if self.queue:
|
|
65
|
+
await self.queue.put(getRandomWaitingMessage(app_tool.waiting_message, traduction=False))
|
|
66
|
+
|
|
67
|
+
return await VectorDbManager.get_strategy(app_tool.vector_type).invoke(
|
|
68
|
+
self.llm.get_embeddings(),
|
|
69
|
+
app_tool.vector_db,
|
|
70
|
+
query,
|
|
71
|
+
search_type,
|
|
72
|
+
search_kwargs,
|
|
73
|
+
app_tool=app_tool,
|
|
74
|
+
llm=self.llm.get_llm(),
|
|
75
|
+
source=app_tool.function_id,
|
|
76
|
+
)
|
|
44
77
|
|
|
45
78
|
#region functions
|
|
46
79
|
async def document_retriever(self, query: str) -> list:
|
|
@@ -65,40 +98,7 @@ class ToolManager:
|
|
|
65
98
|
self.app_tool.type == "function" and self.app_tool.vector_db
|
|
66
99
|
#and self.settings.get("dataSource") == "knowledgebase"
|
|
67
100
|
):
|
|
68
|
-
|
|
69
|
-
search_kwargs = {"k": 4}
|
|
70
|
-
if self.app_tool.search_settings:
|
|
71
|
-
search_settings = self.app_tool.search_settings # type: ignore
|
|
72
|
-
if search_settings.search_type == "similarityScoreThreshold":
|
|
73
|
-
search_type = "similarity_score_threshold"
|
|
74
|
-
search_kwargs = {
|
|
75
|
-
"score_threshold": search_settings.score_threshold_id if search_settings.score_threshold_id else 0.5,
|
|
76
|
-
"k": search_settings.search_k if search_settings.search_k else 100
|
|
77
|
-
}
|
|
78
|
-
elif search_settings.search_type == "mmr":
|
|
79
|
-
search_type = "mmr"
|
|
80
|
-
search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
|
|
81
|
-
elif search_settings.search_type == "default":
|
|
82
|
-
search_type = "similarity"
|
|
83
|
-
search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
|
|
84
|
-
else:
|
|
85
|
-
search_type = "mixed"
|
|
86
|
-
search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
|
|
87
|
-
if self.queue:
|
|
88
|
-
await self.queue.put(getRandomWaitingMessage(self.app_tool.waiting_message, traduction=False))
|
|
89
|
-
|
|
90
|
-
return await VectorDbManager.get_strategy(self.app_tool.vector_type).invoke(
|
|
91
|
-
self.llm.get_embeddings(),
|
|
92
|
-
self.app_tool.vector_db,
|
|
93
|
-
query,
|
|
94
|
-
search_type,
|
|
95
|
-
search_kwargs,
|
|
96
|
-
app_tool=self.app_tool,
|
|
97
|
-
llm=self.llm.get_llm(),
|
|
98
|
-
source=self.app_tool.function_id,
|
|
99
|
-
)
|
|
100
|
-
return []
|
|
101
|
-
#raise ValueError(f"Invalid configuration for {self.settings.name} tool of type {self.settings.type}. Must be a function or vector db not found.")
|
|
101
|
+
return await self.__extract_documents(query, self.app_tool)
|
|
102
102
|
|
|
103
103
|
async def image_generator(self, query: str, language: str = "it"):
|
|
104
104
|
"""
|
|
@@ -120,12 +120,66 @@ class ToolManager:
|
|
|
120
120
|
except Exception as e:
|
|
121
121
|
return f"Error: {str(e)}"
|
|
122
122
|
|
|
123
|
+
async def llm_chain(self, input: str):
|
|
124
|
+
if self.app_tool.type == "llmChain":
|
|
125
|
+
from langchain_core.prompts import ChatPromptTemplate
|
|
126
|
+
from langchain_core.output_parsers import StrOutputParser
|
|
127
|
+
system_message = self.app_tool.llm_chain_settings.prompt
|
|
128
|
+
context = []
|
|
129
|
+
if self.app_tool.data_source == "knowledgebase":
|
|
130
|
+
context = await self.__extract_documents(input, self.app_tool)
|
|
131
|
+
if len(context) > 0:
|
|
132
|
+
for doc in context:
|
|
133
|
+
system_message += f"\n\nContext:\n{doc.metadata.get("source", "")}: {doc.page_content}"
|
|
134
|
+
prompt = ChatPromptTemplate.from_messages(
|
|
135
|
+
[ ("system", system_message),
|
|
136
|
+
("user", "{input}")],
|
|
137
|
+
)
|
|
138
|
+
model = self.app_tool.llm_chain_settings.model
|
|
139
|
+
self.llm.config.model = model
|
|
140
|
+
llm = self.llm.get_llm()
|
|
141
|
+
chain = prompt | llm | StrOutputParser()
|
|
142
|
+
result = await chain.ainvoke({"input": input})
|
|
143
|
+
return result
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
async def search_online(self, query: str):
|
|
147
|
+
from ws_bom_robot_app.llm.tools.utils import fetch_page, extract_content_with_trafilatura
|
|
148
|
+
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
|
|
149
|
+
import aiohttp, asyncio, ast
|
|
150
|
+
# Wrapper DuckDuckGo
|
|
151
|
+
search = DuckDuckGoSearchAPIWrapper(max_results=10)
|
|
152
|
+
try:
|
|
153
|
+
raw_results = search.results(query, max_results=10)
|
|
154
|
+
except Exception as e:
|
|
155
|
+
print(f"[!] Errore ricerca: {e}")
|
|
156
|
+
urls = [r["link"] for r in raw_results]
|
|
157
|
+
async with aiohttp.ClientSession() as session:
|
|
158
|
+
tasks = [fetch_page(session, url) for url in urls]
|
|
159
|
+
responses = await asyncio.gather(*tasks)
|
|
160
|
+
final_results = []
|
|
161
|
+
for item in responses:
|
|
162
|
+
url = item["url"]
|
|
163
|
+
html = item["html"]
|
|
164
|
+
if html:
|
|
165
|
+
content = await extract_content_with_trafilatura(html)
|
|
166
|
+
if content:
|
|
167
|
+
final_results.append({"url": url, "content": content})
|
|
168
|
+
else:
|
|
169
|
+
final_results.append({"url": url, "content": "No content found"})
|
|
170
|
+
else:
|
|
171
|
+
final_results.append({"url": url, "content": "Page not found"})
|
|
172
|
+
return final_results
|
|
173
|
+
|
|
174
|
+
|
|
123
175
|
#endregion
|
|
124
176
|
|
|
125
177
|
#class variables (static)
|
|
126
178
|
_list: dict[str,ToolConfig] = {
|
|
127
179
|
"document_retriever": ToolConfig(function=document_retriever, model=DocumentRetrieverInput),
|
|
128
180
|
"image_generator": ToolConfig(function=image_generator, model=ImageGeneratorInput),
|
|
181
|
+
"llm_chain": ToolConfig(function=llm_chain, model=LlmChainInput),
|
|
182
|
+
"search_online": ToolConfig(function=search_online, model=SearchOnlineInput),
|
|
129
183
|
}
|
|
130
184
|
|
|
131
185
|
#instance methods
|
|
@@ -23,3 +23,19 @@ async def translate_text(llm: LlmInterface, language, text: str, callbacks: list
|
|
|
23
23
|
prompt = PromptTemplate.from_template(sys_message)
|
|
24
24
|
chain = prompt | llm.get_llm()
|
|
25
25
|
await chain.ainvoke({"language":language, "testo_da_tradurre": text}, {"callbacks": callbacks})
|
|
26
|
+
|
|
27
|
+
async def fetch_page(session, url):
|
|
28
|
+
try:
|
|
29
|
+
async with session.get(url, timeout=10, ssl=False) as response:
|
|
30
|
+
if response.status == 200:
|
|
31
|
+
text = await response.text()
|
|
32
|
+
return {"url": url, "html": text}
|
|
33
|
+
else:
|
|
34
|
+
return {"url": url, "html": None}
|
|
35
|
+
except Exception as e:
|
|
36
|
+
return {"url": url, "html": None}
|
|
37
|
+
|
|
38
|
+
async def extract_content_with_trafilatura(html):
|
|
39
|
+
"""Estrae solo il testo principale usando trafilatura"""
|
|
40
|
+
import trafilatura
|
|
41
|
+
return trafilatura.extract(html)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ws_bom_robot_app
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.59
|
|
4
4
|
Summary: A FastAPI application serving ws bom/robot/llm platform ai.
|
|
5
5
|
Home-page: https://github.com/websolutespa/bom
|
|
6
6
|
Author: Websolute Spa
|
|
@@ -17,15 +17,16 @@ Requires-Dist: pydantic==2.10.6
|
|
|
17
17
|
Requires-Dist: pydantic-settings==2.7.1
|
|
18
18
|
Requires-Dist: fastapi[standard]==0.115.8
|
|
19
19
|
Requires-Dist: chevron==0.14.0
|
|
20
|
-
Requires-Dist:
|
|
21
|
-
Requires-Dist: langchain
|
|
22
|
-
Requires-Dist: langchain-
|
|
23
|
-
Requires-Dist: langchain-
|
|
20
|
+
Requires-Dist: trafilatura==2.0.0
|
|
21
|
+
Requires-Dist: langchain==0.3.25
|
|
22
|
+
Requires-Dist: langchain-community==0.3.24
|
|
23
|
+
Requires-Dist: langchain-core==0.3.59
|
|
24
|
+
Requires-Dist: langchain-openai==0.3.16
|
|
24
25
|
Requires-Dist: langchain-anthropic==0.3.6
|
|
25
26
|
Requires-Dist: langchain-google-genai==2.0.7
|
|
26
27
|
Requires-Dist: langchain-google-vertexai==2.0.13
|
|
27
|
-
Requires-Dist: langchain-groq==0.2
|
|
28
|
-
Requires-Dist: langchain-ollama==0.2
|
|
28
|
+
Requires-Dist: langchain-groq==0.3.2
|
|
29
|
+
Requires-Dist: langchain-ollama==0.3.2
|
|
29
30
|
Requires-Dist: faiss-cpu==1.9.0
|
|
30
31
|
Requires-Dist: chromadb==0.6.3
|
|
31
32
|
Requires-Dist: langchain_chroma==0.2.1
|
|
@@ -48,6 +49,7 @@ Requires-Dist: unstructured-ingest[sharepoint]
|
|
|
48
49
|
Requires-Dist: unstructured-ingest[slack]
|
|
49
50
|
Requires-Dist: html5lib==1.1
|
|
50
51
|
Requires-Dist: markdownify==0.14.1
|
|
52
|
+
Requires-Dist: duckduckgo-search==8.0.4
|
|
51
53
|
Dynamic: author
|
|
52
54
|
Dynamic: author-email
|
|
53
55
|
Dynamic: classifier
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
ws_bom_robot_app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
2
|
ws_bom_robot_app/auth.py,sha256=84nIbmJsMrNs0sxIQGEHbjsjc2P6ZrZZGSn8dkiL6is,895
|
|
3
|
-
ws_bom_robot_app/config.py,sha256=
|
|
3
|
+
ws_bom_robot_app/config.py,sha256=9W3cz92hbogDrtbCcybpCY29zCka7G225CNQEptpx30,4183
|
|
4
4
|
ws_bom_robot_app/cron_manager.py,sha256=0Yt5AMTPGlXZ_M5ck0SKMX8wvzoPsseEezg_s0Q3HKY,9224
|
|
5
5
|
ws_bom_robot_app/main.py,sha256=zO3B-v-v9ESASvw8IaQj9Y9hNvNmOxohFmA0R82EybQ,6518
|
|
6
6
|
ws_bom_robot_app/task_manager.py,sha256=Zedzs2R3O-wNSQOqs4jorgFwPRi-ji_0TN4mGfk-VvE,15958
|
|
@@ -8,7 +8,7 @@ ws_bom_robot_app/util.py,sha256=b49ItlZgh2Wzw-6K8k5Wa44eVgjQ0JmWQwJnEaQBVGw,3502
|
|
|
8
8
|
ws_bom_robot_app/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
9
9
|
ws_bom_robot_app/llm/agent_context.py,sha256=uatHJ8wcRly6h0S762BgfzDMpmcwCHwNzwo37aWjeE0,1305
|
|
10
10
|
ws_bom_robot_app/llm/agent_description.py,sha256=5IP0qFSJvaE3zjGS7f0W1DuiegP0RHXRMBoDC5pCofA,4779
|
|
11
|
-
ws_bom_robot_app/llm/agent_handler.py,sha256=
|
|
11
|
+
ws_bom_robot_app/llm/agent_handler.py,sha256=4HYP8wbdtJhRi3bk6PvJ3cRDZyLYWt3Ow5tnHpkEg1o,7738
|
|
12
12
|
ws_bom_robot_app/llm/agent_lcel.py,sha256=8d10b43BXqE4rfXE5uh8YGT67o1bw0q0l7QXFT6wPKA,2320
|
|
13
13
|
ws_bom_robot_app/llm/api.py,sha256=1nzQ7g2n_DlX6Ixo5ecS10UvyyKJ42qZQ6aD8-EI7BE,4709
|
|
14
14
|
ws_bom_robot_app/llm/defaut_prompt.py,sha256=D9dn8yPveu0bVwGM1wQWLYftmBs5O76o0R_caLLll8w,1121
|
|
@@ -23,13 +23,13 @@ ws_bom_robot_app/llm/models/base.py,sha256=1TqxuTK3rjJEALn7lvgoen_1ba3R2brAgGx6E
|
|
|
23
23
|
ws_bom_robot_app/llm/models/feedback.py,sha256=pYNQGxNOBgeAAfdJLI95l7ePLBI5tVdsgnyjp5oMOQU,1722
|
|
24
24
|
ws_bom_robot_app/llm/models/kb.py,sha256=oVSw6_dmNxikAHrPqcfxDXz9M0ezLIYuxpgvzfs_Now,9514
|
|
25
25
|
ws_bom_robot_app/llm/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
|
-
ws_bom_robot_app/llm/providers/llm_manager.py,sha256=
|
|
26
|
+
ws_bom_robot_app/llm/providers/llm_manager.py,sha256=zIkxgTLYQCcup2Ixf4eWap4mNinuJH2YmkjLjZGDyJM,8371
|
|
27
27
|
ws_bom_robot_app/llm/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
28
28
|
ws_bom_robot_app/llm/tools/tool_builder.py,sha256=p_Q32_-OSydcxzj69PgPIuiny816zYv5dVsCHSY0ELc,1188
|
|
29
|
-
ws_bom_robot_app/llm/tools/tool_manager.py,sha256=
|
|
30
|
-
ws_bom_robot_app/llm/tools/utils.py,sha256=
|
|
29
|
+
ws_bom_robot_app/llm/tools/tool_manager.py,sha256=adWvaSIur5Ez2gGsuTFqNprZZlQP6ZZj5WknzJjtQ0c,8355
|
|
30
|
+
ws_bom_robot_app/llm/tools/utils.py,sha256=Ba7ScFZPVJ3ke8KLO8ik1wyR2f_zC99Bikqx0OGnKoI,1924
|
|
31
31
|
ws_bom_robot_app/llm/tools/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
32
|
-
ws_bom_robot_app/llm/tools/models/main.py,sha256=
|
|
32
|
+
ws_bom_robot_app/llm/tools/models/main.py,sha256=pBQNWPd1OZgZ2xkOnUOawNbujQ5oJXLdyuAex1afLWc,579
|
|
33
33
|
ws_bom_robot_app/llm/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
34
34
|
ws_bom_robot_app/llm/utils/agent.py,sha256=ISF9faaD5tBi-8sbgQpgfqWT1JIVcgv_lRhyaNAkI2Q,1445
|
|
35
35
|
ws_bom_robot_app/llm/utils/chunker.py,sha256=N7570xBYlObneg-fsvDhPAJ-Pv8C8OaYZOBK6q7LmMI,607
|
|
@@ -65,7 +65,7 @@ ws_bom_robot_app/llm/vector_store/loader/__init__.py,sha256=47DEQpj8HBSa-_TImW-5
|
|
|
65
65
|
ws_bom_robot_app/llm/vector_store/loader/base.py,sha256=L_ugekNuAq0N9O-24wtlHSNHkqSeD-KsJrfGt_FX9Oc,5340
|
|
66
66
|
ws_bom_robot_app/llm/vector_store/loader/docling.py,sha256=yP0zgXLeFAlByaYuj-6cYariuknckrFds0dxdRcnVz8,3456
|
|
67
67
|
ws_bom_robot_app/llm/vector_store/loader/json_loader.py,sha256=LDppW0ZATo4_1hh-KlsAM3TLawBvwBxva_a7k5Oz1sc,858
|
|
68
|
-
ws_bom_robot_app-0.0.
|
|
69
|
-
ws_bom_robot_app-0.0.
|
|
70
|
-
ws_bom_robot_app-0.0.
|
|
71
|
-
ws_bom_robot_app-0.0.
|
|
68
|
+
ws_bom_robot_app-0.0.59.dist-info/METADATA,sha256=hTSpZWSUDrn0IY9jsYvUCGocnzK9kbrEiRINEwiDVe0,8406
|
|
69
|
+
ws_bom_robot_app-0.0.59.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
70
|
+
ws_bom_robot_app-0.0.59.dist-info/top_level.txt,sha256=Yl0akyHVbynsBX_N7wx3H3ZTkcMLjYyLJs5zBMDAKcM,17
|
|
71
|
+
ws_bom_robot_app-0.0.59.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|