ws-bom-robot-app 0.0.76__py3-none-any.whl → 0.0.78__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,10 +5,9 @@ from langchain.callbacks.base import AsyncCallbackHandler
5
5
  from ws_bom_robot_app.llm.utils.print import print_json, print_string
6
6
  from typing import Any, Dict, List, Optional, Union
7
7
  from uuid import UUID
8
- import ws_bom_robot_app.llm.settings as settings
9
8
  from langchain_core.callbacks.base import AsyncCallbackHandler
10
9
  from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
11
- from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
10
+ from langchain_core.messages import BaseMessage, AIMessage
12
11
  import json, logging, re
13
12
 
14
13
  # Here is a custom handler that will print the tokens to stdout.
@@ -118,11 +117,6 @@ class AgentHandler(AsyncCallbackHandler):
118
117
  tags: List[str] = None,
119
118
  **kwargs: Any,
120
119
  ) -> None:
121
- settings.chat_history.extend(
122
- [
123
- AIMessage(content=_parse_token(self.llm,finish.return_values["output"])),
124
- ]
125
- )
126
120
  # end-of-stream: flush any remaining text
127
121
  if self.in_json_block:
128
122
  try:
@@ -169,9 +163,4 @@ class RawAgentHandler(AsyncCallbackHandler):
169
163
  tags: List[str] = None,
170
164
  **kwargs: Any,
171
165
  ) -> None:
172
- settings.chat_history.extend(
173
- [
174
- AIMessage(content=_parse_token(self.llm,finish.return_values["output"]))
175
- ]
176
- )
177
166
  await self.queue.put(None)
@@ -21,9 +21,9 @@ async def root():
21
21
  async def _invoke(rq: InvokeRequest):
22
22
  return await invoke(rq)
23
23
 
24
- def _stream_headers(rq: StreamRequest) -> Mapping[str, str]:
24
+ def _rs_stream_headers(rq: StreamRequest) -> Mapping[str, str]:
25
25
  return {
26
- "X-thread-id": rq.msg_id or str(uuid4()),
26
+ "X-thread-id": rq.thread_id or str(uuid4()),
27
27
  "X-msg-id": rq.msg_id or str(uuid4()),
28
28
  }
29
29
 
@@ -40,11 +40,11 @@ async def cms_app_by_id(id: str):
40
40
 
41
41
  @router.post("/stream")
42
42
  async def _stream(rq: StreamRequest, ctx: Request) -> StreamingResponse:
43
- return StreamingResponse(stream(rq, ctx), media_type="application/json", headers=_stream_headers(rq))
43
+ return StreamingResponse(stream(rq, ctx), media_type="application/json", headers=_rs_stream_headers(rq))
44
44
 
45
45
  @router.post("/stream/raw")
46
46
  async def _stream_raw(rq: StreamRequest, ctx: Request) -> StreamingResponse:
47
- return StreamingResponse(stream(rq, ctx, formatted=False), media_type="application/json", headers=_stream_headers(rq))
47
+ return StreamingResponse(stream(rq, ctx, formatted=False), media_type="application/json", headers=_rs_stream_headers(rq))
48
48
 
49
49
  @router.post("/kb")
50
50
  async def _kb(rq: KbRequest) -> VectorDbResponse:
@@ -3,7 +3,7 @@ import asyncio, json, logging, os, traceback, re
3
3
  from fastapi import Request
4
4
  from langchain.callbacks.tracers import LangChainTracer
5
5
  from langchain_core.callbacks.base import AsyncCallbackHandler
6
- from langchain_core.messages import AIMessage, HumanMessage
6
+ from langchain_core.messages import BaseMessage, AIMessage, HumanMessage
7
7
  from langsmith import Client as LangSmithClient
8
8
  from typing import AsyncGenerator, List
9
9
  from ws_bom_robot_app.config import config
@@ -14,7 +14,6 @@ from ws_bom_robot_app.llm.models.api import InvokeRequest, StreamRequest
14
14
  from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
15
15
  from ws_bom_robot_app.llm.tools.tool_builder import get_structured_tools
16
16
  from ws_bom_robot_app.llm.nebuly_handler import NebulyHandler
17
- import ws_bom_robot_app.llm.settings as settings
18
17
 
19
18
  async def invoke(rq: InvokeRequest) -> str:
20
19
  await rq.initialize()
@@ -40,21 +39,19 @@ def _parse_formatted_message(message: str) -> str:
40
39
  except:
41
40
  result = message
42
41
  return result
43
- async def __stream(rq: StreamRequest, ctx: Request, queue: Queue,formatted: bool = True) -> None:
44
- await rq.initialize()
42
+ async def __stream(rq: StreamRequest, ctx: Request, queue: Queue, formatted: bool = True) -> None:
45
43
  #os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
46
- if formatted:
47
- agent_handler = AgentHandler(queue,rq.provider,rq.thread_id)
48
- else:
49
- agent_handler = RawAgentHandler(queue,rq.provider)
50
- os.environ["AGENT_HANDLER_FORMATTED"] = str(formatted)
51
- callbacks: List[AsyncCallbackHandler] = [agent_handler]
52
- settings.init()
53
44
 
54
- #CREATION OF CHAT HISTORY FOR AGENT
45
+ # rq initialization
46
+ await rq.initialize()
47
+ for tool in rq.app_tools:
48
+ tool.thread_id = rq.thread_id
49
+
50
+ #chat history
51
+ chat_history: list[BaseMessage] = []
55
52
  for message in rq.messages:
56
53
  if message.role in ["human","user"]:
57
- settings.chat_history.append(HumanMessage(content=message.content))
54
+ chat_history.append(HumanMessage(content=message.content))
58
55
  elif message.role in ["ai","assistant"]:
59
56
  message_content = ""
60
57
  if formatted:
@@ -79,39 +76,53 @@ async def __stream(rq: StreamRequest, ctx: Request, queue: Queue,formatted: bool
79
76
  else:
80
77
  message_content = message.content
81
78
  if message_content:
82
- settings.chat_history.append(AIMessage(content=message_content))
79
+ chat_history.append(AIMessage(content=message_content))
80
+
81
+ #llm
82
+ __llm: LlmInterface = rq.get_llm()
83
+
84
+ #agent handler
85
+ if formatted:
86
+ agent_handler = AgentHandler(queue, rq.provider, rq.thread_id)
87
+ else:
88
+ agent_handler = RawAgentHandler(queue, rq.provider)
89
+ #TODO: move from os.environ to rq
90
+ os.environ["AGENT_HANDLER_FORMATTED"] = str(formatted)
83
91
 
92
+ #callbacks
93
+ ## agent
94
+ callbacks: List[AsyncCallbackHandler] = [agent_handler]
95
+ ## langchain tracing
84
96
  if rq.lang_chain_tracing:
85
97
  client = LangSmithClient(
86
98
  api_key= rq.secrets.get("langChainApiKey", "")
87
99
  )
88
100
  trace = LangChainTracer(project_name=rq.lang_chain_project,client=client,tags=[str(ctx.base_url) if ctx else ''])
89
101
  callbacks.append(trace)
90
-
91
- __llm: LlmInterface =rq.get_llm()
92
- for tool in rq.app_tools:
93
- tool.thread_id = rq.thread_id
94
- processor = AgentLcel(
95
- llm=__llm,
96
- sys_message=rq.system_message,
97
- sys_context=rq.system_context,
98
- tools=get_structured_tools(__llm, tools=rq.app_tools, callbacks=[callbacks], queue=queue),
99
- rules=rq.rules
100
- )
102
+ ## nebuly tracing
101
103
  if rq.secrets.get("nebulyApiKey","") != "":
102
104
  user_id = rq.system_context.user.id if rq.system_context and rq.system_context.user and rq.system_context.user.id else None
103
105
  nebuly_callback = NebulyHandler(
104
106
  llm_model=__llm.config.model,
105
107
  threadId=rq.thread_id,
108
+ chat_history=chat_history,
106
109
  url=config.NEBULY_API_URL,
107
110
  api_key=rq.secrets.get("nebulyApiKey", None),
108
111
  user_id=user_id
109
112
  )
110
113
  callbacks.append(nebuly_callback)
111
114
 
115
+ # chain
116
+ processor = AgentLcel(
117
+ llm=__llm,
118
+ sys_message=rq.system_message,
119
+ sys_context=rq.system_context,
120
+ tools=get_structured_tools(__llm, tools=rq.app_tools, callbacks=[callbacks], queue=queue),
121
+ rules=rq.rules
122
+ )
112
123
  try:
113
124
  await processor.executor.ainvoke(
114
- {"chat_history": settings.chat_history},
125
+ {"chat_history": chat_history},
115
126
  {"callbacks": callbacks},
116
127
  )
117
128
  except Exception as e:
@@ -122,7 +133,7 @@ async def __stream(rq: StreamRequest, ctx: Request, queue: Queue,formatted: bool
122
133
  await queue.put(_error)
123
134
  await queue.put(None)
124
135
 
125
- # Signal the end of streaming
136
+ # signal the end of streaming
126
137
  await queue.put(None)
127
138
 
128
139
  async def stream(rq: StreamRequest, ctx: Request, formatted: bool = True) -> AsyncGenerator[str, None]:
@@ -169,8 +169,8 @@ class InvokeRequest(LlmApp):
169
169
  mode: str
170
170
 
171
171
  class StreamRequest(LlmApp):
172
- thread_id: Optional[str] = Field(None, validation_alias=AliasChoices("threadId","thread_id"))
173
- msg_id: Optional[str] = Field(None, validation_alias=AliasChoices("msgId","msg_id"))
172
+ thread_id: Optional[str] = Field(default=str(uuid.uuid4()), validation_alias=AliasChoices("threadId","thread_id"))
173
+ msg_id: Optional[str] = Field(default=str(uuid.uuid4()), validation_alias=AliasChoices("msgId","msg_id"))
174
174
  #endregion
175
175
 
176
176
  #region vector_db
@@ -2,16 +2,16 @@ from typing import Union
2
2
  from ws_bom_robot_app.llm.models.api import NebulyInteraction, NebulyLLMTrace, NebulyRetrievalTrace
3
3
  from datetime import datetime, timezone
4
4
  from langchain_core.callbacks.base import AsyncCallbackHandler
5
- import ws_bom_robot_app.llm.settings as settings
6
5
  from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
7
6
  from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
8
7
 
9
8
  class NebulyHandler(AsyncCallbackHandler):
10
- def __init__(self, llm_model: str | None, threadId: str = None, url: str = None, api_key: str = None, user_id: str | None = None):
9
+ def __init__(self, llm_model: str | None, threadId: str = None, chat_history: list[BaseMessage] = [], url: str = None, api_key: str = None, user_id: str | None = None):
11
10
  super().__init__()
12
11
  self.__started: bool = False
13
12
  self.__url: str = url
14
13
  self.__api_key: str = api_key
14
+ self.chat_history = chat_history
15
15
  self.interaction = NebulyInteraction(
16
16
  conversation_id=threadId,
17
17
  input="",
@@ -76,7 +76,7 @@ class NebulyHandler(AsyncCallbackHandler):
76
76
  self.interaction.output = finish.return_values["output"]
77
77
  # Trace
78
78
  self.llm_trace.output = finish.return_values["output"]
79
- message_history = self._convert_to_json_format(settings.chat_history)[:-1]
79
+ message_history = self._convert_to_json_format(self.chat_history)
80
80
  self.llm_trace.messages = self.__parse_multimodal_history(message_history)
81
81
  await self.__send_interaction()
82
82
 
@@ -153,7 +153,7 @@ class NebulyHandler(AsyncCallbackHandler):
153
153
  parsed_input += item.get("text", "")
154
154
  elif item.get("type") == "image_url":
155
155
  parsed_input += " <image>"
156
- print(parsed_input)
156
+ # print(parsed_input)
157
157
  return parsed_input
158
158
 
159
159
  def __parse_multimodal_history(self, messages: list[dict]) -> list[dict]:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ws_bom_robot_app
3
- Version: 0.0.76
3
+ Version: 0.0.78
4
4
  Summary: A FastAPI application serving ws bom/robot/llm platform ai.
5
5
  Home-page: https://github.com/websolutespa/bom
6
6
  Author: Websolute Spa
@@ -25,7 +25,7 @@ Requires-Dist: langchain-anthropic==0.3.6
25
25
  Requires-Dist: langchain-ibm==0.3.14
26
26
  Requires-Dist: langchain-google-genai==2.0.7
27
27
  Requires-Dist: langchain-google-vertexai==2.0.27
28
- Requires-Dist: langchain-groq==0.3.5
28
+ Requires-Dist: langchain-groq==0.3.6
29
29
  Requires-Dist: langchain-ollama==0.3.3
30
30
  Requires-Dist: faiss-cpu==1.11.0
31
31
  Requires-Dist: chromadb==1.0.15
@@ -8,17 +8,16 @@ ws_bom_robot_app/util.py,sha256=RjVD6B9sHje788Lndqq5DHy6TJM0KLs9qx3JYt81Wyk,4834
8
8
  ws_bom_robot_app/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
9
  ws_bom_robot_app/llm/agent_context.py,sha256=uatHJ8wcRly6h0S762BgfzDMpmcwCHwNzwo37aWjeE0,1305
10
10
  ws_bom_robot_app/llm/agent_description.py,sha256=yK4aVU3RNk1oP4bEneV3QPAi-208JwWk4R6qHlzqYIg,4656
11
- ws_bom_robot_app/llm/agent_handler.py,sha256=BQ-f--Z5QCJDp-7tzSG_CKrANUCqG65S09psgWVNxa4,7597
11
+ ws_bom_robot_app/llm/agent_handler.py,sha256=TnpfChHLWVQ-gCEHNQPW3UXiuS8AmiP8JYwRz9pqbCg,7203
12
12
  ws_bom_robot_app/llm/agent_lcel.py,sha256=tVa1JJOuL1CG0tXS5AwOB4gli0E2rGqSBD5oEehHvOY,2480
13
- ws_bom_robot_app/llm/api.py,sha256=2bF-UFczY9LuBqPxKObM0TOWYbZgVztX1RiIz5MSorU,5042
13
+ ws_bom_robot_app/llm/api.py,sha256=EUllZaJvtm1CQPxJ6QfufpBhZG_-ew8gSK-vxVg5r58,5054
14
14
  ws_bom_robot_app/llm/defaut_prompt.py,sha256=LlCd_nSMkMmHESfiiiQYfnJyB6Pp-LSs4CEKdYW4vFk,1106
15
- ws_bom_robot_app/llm/main.py,sha256=vzUfaLCRk2SYujD00hnrTiHEVLYgZcbSw6LUea43siU,5235
16
- ws_bom_robot_app/llm/nebuly_handler.py,sha256=d4TI5XbvIYJLCxHcCUa6QUxsgwKW_4ItCYe4ocn7IJo,7900
17
- ws_bom_robot_app/llm/settings.py,sha256=EkFGCppORenStH9W4e6_dYvQ-5p6xiEMpmUHBqNqG9M,117
15
+ ws_bom_robot_app/llm/main.py,sha256=BXTIfVc9Ck7izZ893qry7C_uz1A8ZupbcHivrZrjpxY,5372
16
+ ws_bom_robot_app/llm/nebuly_handler.py,sha256=hbkiTc0Jl4EzwXltpICiUXM5i5wOsmEX_Chyr1NhvSc,7924
18
17
  ws_bom_robot_app/llm/feedbacks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
18
  ws_bom_robot_app/llm/feedbacks/feedback_manager.py,sha256=WcKgzlOb8VFG7yqHoIOO_R6LAzdzE4YIRFCVOGBSgfM,2856
20
19
  ws_bom_robot_app/llm/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- ws_bom_robot_app/llm/models/api.py,sha256=8hllPUnPM6Rp6TgcvTQtpUh5Nk36r1qcvIEc2O7LZJE,10968
20
+ ws_bom_robot_app/llm/models/api.py,sha256=qr9BO3NqQ4juYcJGARPiKas5Mj2gIg6gUH9MpmDkMLE,11010
22
21
  ws_bom_robot_app/llm/models/base.py,sha256=1TqxuTK3rjJEALn7lvgoen_1ba3R2brAgGx6EDTtDZo,152
23
22
  ws_bom_robot_app/llm/models/feedback.py,sha256=zh1jLqPRLzNlxInkCMoiJbfSu0-tiOEYHM7FhC46PkM,1692
24
23
  ws_bom_robot_app/llm/models/kb.py,sha256=oVSw6_dmNxikAHrPqcfxDXz9M0ezLIYuxpgvzfs_Now,9514
@@ -67,7 +66,7 @@ ws_bom_robot_app/llm/vector_store/loader/__init__.py,sha256=47DEQpj8HBSa-_TImW-5
67
66
  ws_bom_robot_app/llm/vector_store/loader/base.py,sha256=L_ugekNuAq0N9O-24wtlHSNHkqSeD-KsJrfGt_FX9Oc,5340
68
67
  ws_bom_robot_app/llm/vector_store/loader/docling.py,sha256=yP0zgXLeFAlByaYuj-6cYariuknckrFds0dxdRcnVz8,3456
69
68
  ws_bom_robot_app/llm/vector_store/loader/json_loader.py,sha256=qo9ejRZyKv_k6jnGgXnu1W5uqsMMtgqK_uvPpZQ0p74,833
70
- ws_bom_robot_app-0.0.76.dist-info/METADATA,sha256=e6QyzVw9spgo-qPmhDmX-O5oYtDvGN_zJLnhy2gHTuk,8609
71
- ws_bom_robot_app-0.0.76.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
72
- ws_bom_robot_app-0.0.76.dist-info/top_level.txt,sha256=Yl0akyHVbynsBX_N7wx3H3ZTkcMLjYyLJs5zBMDAKcM,17
73
- ws_bom_robot_app-0.0.76.dist-info/RECORD,,
69
+ ws_bom_robot_app-0.0.78.dist-info/METADATA,sha256=96-Ate6TbTUzRqNCLEe6gJEblJOA4r9BoVY6Ajbb2_4,8609
70
+ ws_bom_robot_app-0.0.78.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
71
+ ws_bom_robot_app-0.0.78.dist-info/top_level.txt,sha256=Yl0akyHVbynsBX_N7wx3H3ZTkcMLjYyLJs5zBMDAKcM,17
72
+ ws_bom_robot_app-0.0.78.dist-info/RECORD,,
@@ -1,4 +0,0 @@
1
- def init():
2
- """Initialize the chat history list as a global var"""
3
- global chat_history
4
- chat_history = []