ws-bom-robot-app 0.0.41__py3-none-any.whl → 0.0.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,6 +21,7 @@ class Settings(BaseSettings):
21
21
  robot_cms_db_folder: str = 'llmVectorDb'
22
22
  robot_cms_kb_folder: str ='llmKbFile'
23
23
  ANTHROPIC_API_KEY: str = ''
24
+ DEEPSEEK_API_KEY: str = ''
24
25
  OPENAI_API_KEY: str = '' # used also for saas dall-e api
25
26
  OLLAMA_API_URL: str = 'http://localhost:11434'
26
27
  GROQ_API_KEY: str = ''
@@ -36,6 +37,7 @@ class Settings(BaseSettings):
36
37
  os.environ["OPENAI_API_KEY"] = self.OPENAI_API_KEY
37
38
  os.environ["OLLAMA_API_URL"] = self.OLLAMA_API_URL
38
39
  os.environ["ANTHROPIC_API_KEY"] = self.ANTHROPIC_API_KEY
40
+ os.environ["DEEPSEEK_API_KEY"] = self.DEEPSEEK_API_KEY
39
41
  os.environ["GROQ_API_KEY"] = self.GROQ_API_KEY
40
42
  os.environ["GOOGLE_API_KEY"] = self.GOOGLE_API_KEY
41
43
  os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = self.GOOGLE_APPLICATION_CREDENTIALS
@@ -2,14 +2,14 @@ from asyncio import Queue
2
2
  from langchain_core.agents import AgentFinish
3
3
  from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
4
4
  from langchain.callbacks.base import AsyncCallbackHandler
5
- from ws_bom_robot_app.llm.utils.print import printJson, printString
5
+ from ws_bom_robot_app.llm.utils.print import print_json, print_string
6
6
  from typing import Any, Dict, List, Optional, Union
7
7
  from uuid import UUID
8
8
  import ws_bom_robot_app.llm.settings as settings
9
9
  from langchain_core.callbacks.base import AsyncCallbackHandler
10
10
  from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
11
11
  from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
12
- import json
12
+ import json, logging, re
13
13
 
14
14
  # Here is a custom handler that will print the tokens to stdout.
15
15
  # Instead of printing to stdout you can send the data elsewhere; e.g., to a streaming API response
@@ -31,34 +31,25 @@ class AgentHandler(AsyncCallbackHandler):
31
31
  def __init__(self, queue: Queue, llm:str, threadId: str = None) -> None:
32
32
  super().__init__()
33
33
  self._threadId = threadId
34
- self.json_block = ""
35
- self.is_json_block = False
36
- self.backtick_count = 0 # Conteggio dei backticks per il controllo accurato
37
34
  self.queue = queue
38
35
  self.llm = llm
39
-
40
- async def on_llm_start(
41
- self,
42
- serialized: Dict[str, Any],
43
- prompts: List[str],
44
- *,
45
- run_id: UUID,
46
- parent_run_id: UUID = None,
47
- tags: List[str] = None,
48
- metadata: Dict[str, Any] = None,
49
- **kwargs: Any,
50
- ) -> None:
51
- firstChunk = {
52
- "type": "info",
53
- "threadId": self._threadId,
54
- }
55
- await self.queue.put(printString(firstChunk))
56
-
57
- """async def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], *, run_id: UUID = None, parent_run_id = None, tags = None, metadata = None, **kwargs: Any) -> Any:
58
- pass"""
59
-
60
- async def on_tool_end(self, output: Any, *, run_id: UUID, parent_run_id: UUID = None, tags: List[str] = None, **kwargs: Any) -> None:
61
- pass
36
+ self.__started: bool = False
37
+ # on new token event
38
+ self.stream_buffer = "" # accumulates text that hasn't been processed yet
39
+ self.in_json_block = False
40
+ self.json_buffer = ""
41
+ self.json_start_regex = re.compile(r'(`{1,3}\s*json\b)') # detect a potential json start fence.
42
+ self.json_end_regex = re.compile(r'(`{1,3})') # an end fence (one to three backticks).
43
+ self.stream_cut_last_output_chunk_size = 16 # safe cut last chunk size to output if no markers are found
44
+
45
+ async def on_chat_model_start(self, serialized, messages, *, run_id, parent_run_id = None, tags = None, metadata = None, **kwargs):
46
+ if not self.__started:
47
+ self.__started = True
48
+ firstChunk = {
49
+ "type": "info",
50
+ "threadId": self._threadId,
51
+ }
52
+ await self.queue.put(print_json(firstChunk))
62
53
 
63
54
  async def on_llm_new_token(
64
55
  self,
@@ -70,27 +61,54 @@ class AgentHandler(AsyncCallbackHandler):
70
61
  tags: Optional[List[str]] = None,
71
62
  **kwargs: Any,
72
63
  ) -> None:
73
- """Gestisce i nuovi token durante lo streaming."""
74
-
75
64
  if token:
76
65
  token = _parse_token(self.llm,token)
77
66
  if token:
78
- self.backtick_count += token.count("`")
79
-
80
- if self.backtick_count >= 3:
81
- if not self.is_json_block:
82
- self.is_json_block = True
83
- self.json_block = ""
84
- else:
85
- self.is_json_block = False
86
- self.json_block += token.replace("```json", '')
87
- await self.process_json_block(self.json_block)
88
- self.json_block = ""
89
- self.backtick_count = 0
90
- elif self.is_json_block:
91
- self.json_block += token
67
+ self.stream_buffer += token.replace('\n','') # append new data to pending buffer
68
+ if not self.in_json_block:
69
+ # search for the start of a json block.
70
+ start_match = self.json_start_regex.search(self.stream_buffer)
71
+ if start_match:
72
+ start_index = start_match.start()
73
+ # everything before the start marker is normal content.
74
+ if start_index > 0:
75
+ _before = self.stream_buffer[:start_index].replace('`','').strip() # remove eventual preceding backticks.
76
+ if _before:
77
+ await self.queue.put(print_string(_before))
78
+ # remove the start marker from pending.
79
+ self.stream_buffer = self.stream_buffer[start_match.end():]
80
+ # switch into json mode.
81
+ self.in_json_block = True
82
+ self.json_buffer = ""
83
+ else:
84
+ # no json start marker found. It might be because the marker is split between chunks.
85
+ # to avoid losing potential marker fragments, output what we can safely process:
86
+ # if the pending text is long, we output most of it except the last few characters.
87
+ if len(self.stream_buffer) > self.stream_cut_last_output_chunk_size:
88
+ safe_cut = self.stream_buffer[:-3]
89
+ await self.queue.put(print_string(safe_cut))
90
+ self.stream_buffer = self.stream_buffer[-3:]
92
91
  else:
93
- await self.queue.put(printString(token))
92
+ # in json block: look for an end fence.
93
+ end_match = self.json_end_regex.search(self.stream_buffer,endpos=3)
94
+ if end_match:
95
+ end_index = end_match.start()
96
+ self.json_buffer += self.stream_buffer[:end_index]
97
+ try:
98
+ data = json.loads(self.json_buffer.replace('`',''))
99
+ await self.queue.put(print_json(data))
100
+ except json.JSONDecodeError as e:
101
+ logging.error(f"on_token: invalid json: {e} | {self.json_buffer}")
102
+ finally:
103
+ self.json_buffer = ""
104
+ # remove the end fence from pending.
105
+ self.stream_buffer = self.stream_buffer[end_match.end():].replace('`','').strip()
106
+ self.in_json_block = False
107
+ else:
108
+ # no end marker found
109
+ # accumulate everything and break to wait for more data.
110
+ self.json_buffer += self.stream_buffer
111
+ self.stream_buffer = ""
94
112
 
95
113
  async def on_agent_finish(
96
114
  self,
@@ -106,23 +124,21 @@ class AgentHandler(AsyncCallbackHandler):
106
124
  AIMessage(content=_parse_token(self.llm,finish.return_values["output"])),
107
125
  ]
108
126
  )
127
+ # end-of-stream: flush any remaining text
128
+ if self.in_json_block:
129
+ try:
130
+ data = json.loads(self.json_buffer)
131
+ await self.queue.put(print_json(data))
132
+ except json.JSONDecodeError as e :
133
+ logging.error(f"on_agent_finish: invalid json: {e} | {self.json_buffer}")
134
+ #await self.queue.put(print_string(self.json_buffer))
135
+ elif self.stream_buffer:
136
+ await self.queue.put(print_string(self.stream_buffer))
137
+
109
138
  finalChunk = {"type": "end"}
110
- await self.queue.put(printJson(finalChunk))
139
+ await self.queue.put(print_json(finalChunk))
111
140
  await self.queue.put(None)
112
141
 
113
- async def process_json_block(self, json_block: str):
114
- """Processa il blocco JSON completo."""
115
- # Rimuove il delimitatore iniziale '```json' se presente, e spazi vuoti
116
- json_block_clean = json_block.replace('```', '').replace('json', '').strip()
117
- # Verifica che il blocco non sia vuoto prima di tentare il parsing
118
- if json_block_clean:
119
- try:
120
- # Prova a fare il parsing del JSON
121
- parsed_json = json.loads(json_block_clean)
122
- await self.queue.put(printJson(parsed_json))
123
- except json.JSONDecodeError as e:
124
- # Se il JSON è malformato, logga l'errore
125
- raise e
126
142
 
127
143
  class RawAgentHandler(AsyncCallbackHandler):
128
144
 
@@ -130,24 +146,6 @@ class RawAgentHandler(AsyncCallbackHandler):
130
146
  super().__init__()
131
147
  self.queue = queue
132
148
  self.llm = llm
133
- async def on_llm_start(
134
- self,
135
- serialized: Dict[str, Any],
136
- prompts: List[str],
137
- *,
138
- run_id: UUID,
139
- parent_run_id: UUID = None,
140
- tags: List[str] = None,
141
- metadata: Dict[str, Any] = None,
142
- **kwargs: Any,
143
- ) -> None:
144
- pass
145
-
146
- """async def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], *, run_id: UUID = None, parent_run_id = None, tags = None, metadata = None, **kwargs: Any) -> Any:
147
- pass"""
148
-
149
- async def on_tool_end(self, output: Any, *, run_id: UUID, parent_run_id: UUID = None, tags: List[str] = None, **kwargs: Any) -> None:
150
- pass
151
149
 
152
150
  async def on_llm_new_token(
153
151
  self,
@@ -160,7 +158,7 @@ class RawAgentHandler(AsyncCallbackHandler):
160
158
  **kwargs: Any,
161
159
  ) -> None:
162
160
  """Handles new tokens during streaming."""
163
- if token: # Only process non-empty tokens
161
+ if token: # only process non-empty tokens
164
162
  await self.queue.put(_parse_token(self.llm,token))
165
163
 
166
164
  async def on_agent_finish(
@@ -20,32 +20,26 @@ class AgentLcel:
20
20
  self.__tools = tools
21
21
  self.rules = rules
22
22
  self.embeddings = llm.get_embeddings()
23
- self.memory_key = "chat_history"
23
+ self.memory_key: str = "chat_history"
24
24
  self.__llm_with_tools = llm.get_llm().bind_tools(self.__tools) if len(self.__tools) > 0 else llm.get_llm()
25
25
  self.executor = self.__create_agent()
26
26
 
27
27
  async def __create_prompt(self, input: dict) -> ChatPromptTemplate:
28
- message : LlmMessage = input["input"]
28
+ message : LlmMessage = input[self.memory_key][-1]
29
29
  input = message.content
30
30
  rules_prompt = await get_rules(self.embeddings, self.rules, input) if self.rules else ""
31
31
  system = default_prompt + (tool_prompt(render_text_description(self.__tools)) if len(self.__tools)>0 else "") + self.sys_message + rules_prompt
32
- return ChatPromptTemplate.from_messages(
33
- [
34
- (
35
- "system", system
36
- ),
32
+ return ChatPromptTemplate([
33
+ ("system", system),
37
34
  MessagesPlaceholder(variable_name=self.memory_key),
38
- ("user", "{input}"),
39
35
  MessagesPlaceholder(variable_name="agent_scratchpad"),
40
- ]
41
- )
36
+ ])
42
37
 
43
38
  def __create_agent(self) -> AgentExecutor:
44
39
  agent: Any = (
45
40
  {
46
- "input": lambda x: x["input"],
47
41
  "agent_scratchpad": lambda x: self.__llm.get_formatter(x["intermediate_steps"]),
48
- "chat_history": lambda x: x["chat_history"],
42
+ str(self.memory_key): lambda x: x[self.memory_key],
49
43
  }
50
44
  | RunnableLambda(self.__create_prompt)
51
45
  | self.__llm_with_tools
@@ -1,5 +1,5 @@
1
1
  from asyncio import Queue
2
- import asyncio, json, logging, os, traceback
2
+ import asyncio, json, logging, os, traceback, re
3
3
  from fastapi import Request
4
4
  from langchain.callbacks.tracers import LangChainTracer
5
5
  from langchain_core.callbacks.base import AsyncCallbackHandler
@@ -28,6 +28,18 @@ async def invoke(rq: InvokeRequest) -> str:
28
28
  result: AIMessage = await processor.run_agent(_msg)
29
29
  return {"result": result.content}
30
30
 
31
+ def _parse_formatted_message(message: str) -> str:
32
+ try:
33
+ text_fragments = []
34
+ quoted_strings = re.findall(r'"([^"\\]*(?:\\.[^"\\]*)*)"', message)
35
+ for string in quoted_strings:
36
+ if not string.startswith(('threadId', 'type')) and len(string) > 1:
37
+ text_fragments.append(string)
38
+ result = ''.join(text_fragments)
39
+ result = result.replace('\\n', '\n')
40
+ except:
41
+ result = message
42
+ return result
31
43
  async def __stream(rq: StreamRequest, ctx: Request, queue: Queue,formatted: bool = True) -> None:
32
44
  await rq.initialize()
33
45
  #os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
@@ -41,21 +53,33 @@ async def __stream(rq: StreamRequest, ctx: Request, queue: Queue,formatted: bool
41
53
 
42
54
  #CREATION OF CHAT HISTORY FOR AGENT
43
55
  for message in rq.messages:
44
- if message.role == "user":
45
- settings.chat_history.append(HumanMessage(content=message.content))
46
- elif message.role == "assistant":
47
- message_content = ""
48
- if formatted and '{\"type\":\"text\"' in message.content:
49
- try:
50
- json_msg = json.loads('[' + message.content[:-1] + ']')
51
- for msg in json_msg:
52
- if msg.get("content"):
53
- message_content += msg["content"]
54
- except:
55
- message_content = message.content
56
- else:
57
- message_content = message.content
58
- settings.chat_history.append(AIMessage(content=message_content))
56
+ if message.role in ["human","user"]:
57
+ settings.chat_history.append(HumanMessage(content=message.content))
58
+ elif message.role in ["ai","assistant"]:
59
+ message_content = ""
60
+ if formatted:
61
+ if '{\"type\":\"string\"' in message.content:
62
+ try:
63
+ json_msg = json.loads('[' + message.content[:-1] + ']')
64
+ for msg in json_msg:
65
+ if msg.get("content"):
66
+ message_content += msg["content"]
67
+ except:
68
+ message_content = _parse_formatted_message(message.content)
69
+ elif '{\"type\":\"text\"' in message.content:
70
+ try:
71
+ json_msg = json.loads('[' + message.content[:-1] + ']')
72
+ for msg in json_msg:
73
+ if msg.get("text"):
74
+ message_content += msg["text"]
75
+ except:
76
+ message_content = _parse_formatted_message(message.content)
77
+ else:
78
+ message_content = _parse_formatted_message(message.content)
79
+ else:
80
+ message_content = message.content
81
+ if message_content:
82
+ settings.chat_history.append(AIMessage(content=message_content))
59
83
 
60
84
  if rq.lang_chain_tracing:
61
85
  client = LangSmithClient(
@@ -79,11 +103,9 @@ async def __stream(rq: StreamRequest, ctx: Request, queue: Queue,formatted: bool
79
103
  )
80
104
  callbacks.append(nebuly_callback)
81
105
 
82
- #with warnings.catch_warnings():
83
- # warnings.simplefilter("ignore", UserWarning)
84
106
  try:
85
107
  await processor.executor.ainvoke(
86
- {"input": rq.messages[-1], "chat_history": settings.chat_history},
108
+ {"chat_history": settings.chat_history},
87
109
  {"callbacks": callbacks},
88
110
  )
89
111
  except Exception as e:
@@ -44,7 +44,9 @@ class OpenAI(LlmInterface):
44
44
 
45
45
  def get_llm(self):
46
46
  from langchain_openai import ChatOpenAI
47
- chat = ChatOpenAI(api_key=self.config.api_key, model=self.config.model)
47
+ chat = ChatOpenAI(
48
+ api_key=self.config.api_key or os.getenv("OPENAI_API_KEY"),
49
+ model=self.config.model)
48
50
  if not any(self.config.model.startswith(prefix) for prefix in ["o1", "o3"]):
49
51
  chat.temperature = self.config.temperature
50
52
  chat.streaming = True
@@ -60,9 +62,9 @@ class DeepSeek(LlmInterface):
60
62
  def get_llm(self):
61
63
  from langchain_openai import ChatOpenAI
62
64
  return ChatOpenAI(
63
- api_key=self.config.api_key,
65
+ api_key=self.config.api_key or os.getenv("DEEPSEEK_API_KEY"),
64
66
  model=self.config.model,
65
- base_url="https://api.deepseek.com/v1",
67
+ base_url="https://api.deepseek.com",
66
68
  max_tokens=8192,
67
69
  temperature=self.config.temperature,
68
70
  streaming=True,
@@ -79,7 +81,7 @@ class Google(LlmInterface):
79
81
  from langchain_google_genai.chat_models import ChatGoogleGenerativeAI
80
82
  return ChatGoogleGenerativeAI(
81
83
  name="chat",
82
- api_key=self.config.api_key,
84
+ api_key=self.config.api_key or os.getenv("GOOGLE_API_KEY"),
83
85
  model=self.config.model,
84
86
  temperature=self.config.temperature,
85
87
  disable_streaming=False
@@ -89,7 +91,7 @@ class Google(LlmInterface):
89
91
  from langchain_google_genai.embeddings import GoogleGenerativeAIEmbeddings
90
92
  return GoogleGenerativeAIEmbeddings(
91
93
  google_api_key=self.config.api_key,
92
- model="models/text-embedding-004")
94
+ model="models/text-embedding-005")
93
95
 
94
96
  def get_models(self):
95
97
  import google.generativeai as genai
@@ -112,7 +114,7 @@ class Gvertex(LlmInterface):
112
114
  )
113
115
  def get_embeddings(self):
114
116
  from langchain_google_vertexai import VertexAIEmbeddings
115
- return VertexAIEmbeddings(model_name="text-embedding-004")
117
+ return VertexAIEmbeddings(model_name="text-embedding-005")
116
118
  def get_models(self):
117
119
  #from google.cloud import aiplatform
118
120
  #aiplatform.init()
@@ -123,7 +125,7 @@ class Gvertex(LlmInterface):
123
125
  #see https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#united-states for available models
124
126
  return [
125
127
  {"id":"gemini-2.0-flash-001"},
126
- {"id":"gemini-1.5-pro-001"},
128
+ {"id":"gemini-2.0-flash-lite-001"},
127
129
  {"id":"gemini-1.5-pro-002"}
128
130
  ]
129
131
 
@@ -131,7 +133,7 @@ class Anthropic(LlmInterface):
131
133
  def get_llm(self):
132
134
  from langchain_anthropic import ChatAnthropic
133
135
  return ChatAnthropic(
134
- api_key=self.config.api_key,
136
+ api_key=self.config.api_key or os.getenv("ANTHROPIC_API_KEY"),
135
137
  model=self.config.model,
136
138
  temperature=self.config.temperature,
137
139
  streaming=True,
@@ -156,7 +158,7 @@ class Groq(LlmInterface):
156
158
  def get_llm(self):
157
159
  from langchain_groq import ChatGroq
158
160
  return ChatGroq(
159
- api_key=self.config.api_key,
161
+ api_key=self.config.api_key or os.getenv("GROQ_API_KEY"),
160
162
  model=self.config.model,
161
163
  #max_tokens=8192,
162
164
  temperature=self.config.temperature,
@@ -2,10 +2,10 @@ import random, os
2
2
  from langchain_openai import ChatOpenAI
3
3
  from langchain_core.prompts import PromptTemplate
4
4
  from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
5
- from ws_bom_robot_app.llm.utils.print import printString
5
+ from ws_bom_robot_app.llm.utils.print import print_string
6
6
 
7
7
  def __print_output(data: str) -> str:
8
- return printString(data) if os.environ.get("AGENT_HANDLER_FORMATTED") == str(True) else f"{data} "
8
+ return print_string(data) if os.environ.get("AGENT_HANDLER_FORMATTED") == str(True) else f"{data} "
9
9
 
10
10
  def getRandomWaitingMessage(waiting_messages: str, traduction: bool = True) -> str:
11
11
  if not waiting_messages: return ""
@@ -14,16 +14,16 @@ class HiddenPrints:
14
14
  sys.stdout = self._original_stdout
15
15
  sys.stderr = self._original_stderr
16
16
 
17
- def printJson(data) -> str:
18
- return f"{json.dumps(data, indent=2, sort_keys=True)},"
17
+ def print_json(data) -> str:
18
+ return print_single_json(data) + ","
19
19
 
20
- def printSingleJson(data) -> str:
21
- return f"{json.dumps(data, indent=2, sort_keys=True)}"
20
+ def print_single_json(data) -> str:
21
+ return json.dumps(data, sort_keys=True)
22
22
 
23
- def printString(data: str) -> str:
23
+ def print_string(data: str) -> str:
24
24
  if data != "":
25
- return printJson(data)
25
+ return print_json(data)
26
26
 
27
- def printSingleString(data: str) -> str:
27
+ def print_single_string(data: str) -> str:
28
28
  if data != "":
29
- return printSingleJson(data)
29
+ return print_single_json(data)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: ws_bom_robot_app
3
- Version: 0.0.41
3
+ Version: 0.0.43
4
4
  Summary: A FastAPI application serving ws bom/robot/llm platform ai.
5
5
  Home-page: https://github.com/websolutespa/bom
6
6
  Author: Websolute Spa
@@ -1,28 +1,28 @@
1
1
  ws_bom_robot_app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  ws_bom_robot_app/auth.py,sha256=84nIbmJsMrNs0sxIQGEHbjsjc2P6ZrZZGSn8dkiL6is,895
3
- ws_bom_robot_app/config.py,sha256=6Rz6-KGYMloekkLL9DUsyPAbdtN5iSTiPzRCuhymJFI,3872
3
+ ws_bom_robot_app/config.py,sha256=RBdpzFMKUiEgVU1omxQhcYxjJVQVOeX9vR2xUlrLd8s,3968
4
4
  ws_bom_robot_app/cron_manager.py,sha256=0Yt5AMTPGlXZ_M5ck0SKMX8wvzoPsseEezg_s0Q3HKY,9224
5
5
  ws_bom_robot_app/main.py,sha256=zO3B-v-v9ESASvw8IaQj9Y9hNvNmOxohFmA0R82EybQ,6518
6
6
  ws_bom_robot_app/task_manager.py,sha256=Zedzs2R3O-wNSQOqs4jorgFwPRi-ji_0TN4mGfk-VvE,15958
7
7
  ws_bom_robot_app/util.py,sha256=b49ItlZgh2Wzw-6K8k5Wa44eVgjQ0JmWQwJnEaQBVGw,3502
8
8
  ws_bom_robot_app/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
9
  ws_bom_robot_app/llm/agent_description.py,sha256=DKPGchYXrkxt8pRpOabn5QE_qn8jSKl92pLpnVLHG5I,4657
10
- ws_bom_robot_app/llm/agent_handler.py,sha256=4zdpSf5iVLxMZ90c_vUl_k-O9SF6u_h7GOB24y4mhIo,6435
11
- ws_bom_robot_app/llm/agent_lcel.py,sha256=Yt-hbKarLktv5BBiTp9OHYRdwOTRu5ogyTrYapFdiTU,2389
10
+ ws_bom_robot_app/llm/agent_handler.py,sha256=37SGUvjV_u6k-ufetulZlMGKksDc4ItoIdz-nwDLIBw,7604
11
+ ws_bom_robot_app/llm/agent_lcel.py,sha256=iF03Q2fiJ60Zv-ia0eYwz46IPMTJaijb6xcn6cJIUZc,2260
12
12
  ws_bom_robot_app/llm/api.py,sha256=UaD1oJyAOe7ASoXxPNJcth3kDuWcjk1xqUNEjuPWbR4,3759
13
13
  ws_bom_robot_app/llm/defaut_prompt.py,sha256=LlCd_nSMkMmHESfiiiQYfnJyB6Pp-LSs4CEKdYW4vFk,1106
14
- ws_bom_robot_app/llm/main.py,sha256=fVXyS9TOu22ZC7M8o2mRCya9vTmMFf5jRgs9V0K_4cw,4189
14
+ ws_bom_robot_app/llm/main.py,sha256=tr7gPjSzO2w85ipSxWVcathNxJC3NKTdJKY89VhhKOM,4973
15
15
  ws_bom_robot_app/llm/settings.py,sha256=EkFGCppORenStH9W4e6_dYvQ-5p6xiEMpmUHBqNqG9M,117
16
16
  ws_bom_robot_app/llm/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
17
  ws_bom_robot_app/llm/models/api.py,sha256=mLbPG7jHh1EjgQG-xpBhEgiTIHpK35HZ51obgqQSfq4,8890
18
18
  ws_bom_robot_app/llm/models/base.py,sha256=1TqxuTK3rjJEALn7lvgoen_1ba3R2brAgGx6EDTtDZo,152
19
19
  ws_bom_robot_app/llm/models/kb.py,sha256=oVSw6_dmNxikAHrPqcfxDXz9M0ezLIYuxpgvzfs_Now,9514
20
20
  ws_bom_robot_app/llm/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- ws_bom_robot_app/llm/providers/llm_manager.py,sha256=GUi5aHQX_4A4pRgv-XEBH42wGPBYW2HUeQeiU_j044E,7924
21
+ ws_bom_robot_app/llm/providers/llm_manager.py,sha256=vaSVbCKV5tbNg9cCVRa9KgeOoSqIbaSuG1DeIERl90M,8113
22
22
  ws_bom_robot_app/llm/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
23
  ws_bom_robot_app/llm/tools/tool_builder.py,sha256=OaA0jReNUpjfe7c8TVLM86acQ4w0cQaR3NE22hGKJb0,1165
24
24
  ws_bom_robot_app/llm/tools/tool_manager.py,sha256=RZcJVPyWT9D3HUxSO1d5kSfTQtJB2CG5hocuFa01AzY,5816
25
- ws_bom_robot_app/llm/tools/utils.py,sha256=SPC8pj2bt_xWO7wNR_5YBwUUvjJIK1xlavR4yfW4J-0,1320
25
+ ws_bom_robot_app/llm/tools/utils.py,sha256=tFrH6ehTAakyltAi9J6Pbqe8j1_alyV660fADhkNO-8,1322
26
26
  ws_bom_robot_app/llm/tools/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
27
  ws_bom_robot_app/llm/tools/models/main.py,sha256=o3Rwbn5nsugKOgLG0FUIuvtPPHYhfVpqG4E3BQB2nWM,388
28
28
  ws_bom_robot_app/llm/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -30,7 +30,7 @@ ws_bom_robot_app/llm/utils/agent.py,sha256=9fdnVMHpSEBdmYef6NFXtMIXa8EB4QQICfXsX
30
30
  ws_bom_robot_app/llm/utils/chunker.py,sha256=N7570xBYlObneg-fsvDhPAJ-Pv8C8OaYZOBK6q7LmMI,607
31
31
  ws_bom_robot_app/llm/utils/download.py,sha256=iAUxH_NiCpTPtGzhC4hBtxotd2HPFt2MBhttslIxqiI,3194
32
32
  ws_bom_robot_app/llm/utils/kb.py,sha256=jja45WCbNI7SGEgqDS99nErlwB5eY8Ga7BMnhdMHZ90,1279
33
- ws_bom_robot_app/llm/utils/print.py,sha256=ZonoLPcfM6Cpw4_Ec455LiCovExOwvnIgvw1QORSCBY,799
33
+ ws_bom_robot_app/llm/utils/print.py,sha256=IsPYEWRJqu-dqlJA3F9OnnIS4rOq_EYX1Ljp3BvDnww,774
34
34
  ws_bom_robot_app/llm/utils/secrets.py,sha256=-HtqLIDVIJrpvGC5YhPAVyLsq8P4ChVM5g3GOfdwqVk,878
35
35
  ws_bom_robot_app/llm/utils/webhooks.py,sha256=LAAZqyN6VhV13wu4X-X85TwdDgAV2rNvIwQFIIc0FJM,2114
36
36
  ws_bom_robot_app/llm/vector_store/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -60,7 +60,7 @@ ws_bom_robot_app/llm/vector_store/loader/__init__.py,sha256=47DEQpj8HBSa-_TImW-5
60
60
  ws_bom_robot_app/llm/vector_store/loader/base.py,sha256=L_ugekNuAq0N9O-24wtlHSNHkqSeD-KsJrfGt_FX9Oc,5340
61
61
  ws_bom_robot_app/llm/vector_store/loader/docling.py,sha256=yP0zgXLeFAlByaYuj-6cYariuknckrFds0dxdRcnVz8,3456
62
62
  ws_bom_robot_app/llm/vector_store/loader/json_loader.py,sha256=qo9ejRZyKv_k6jnGgXnu1W5uqsMMtgqK_uvPpZQ0p74,833
63
- ws_bom_robot_app-0.0.41.dist-info/METADATA,sha256=wfGSdQemB5CByJc9Hujsud2LmLXzKOddHTy_Z_pUPII,8348
64
- ws_bom_robot_app-0.0.41.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
65
- ws_bom_robot_app-0.0.41.dist-info/top_level.txt,sha256=Yl0akyHVbynsBX_N7wx3H3ZTkcMLjYyLJs5zBMDAKcM,17
66
- ws_bom_robot_app-0.0.41.dist-info/RECORD,,
63
+ ws_bom_robot_app-0.0.43.dist-info/METADATA,sha256=zt3XihJLZb1XS5Nx2vh8nDzZO_9HS0nc_gFo90bONtg,8348
64
+ ws_bom_robot_app-0.0.43.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
65
+ ws_bom_robot_app-0.0.43.dist-info/top_level.txt,sha256=Yl0akyHVbynsBX_N7wx3H3ZTkcMLjYyLJs5zBMDAKcM,17
66
+ ws_bom_robot_app-0.0.43.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.8.0)
2
+ Generator: setuptools (76.0.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5