ws-bom-robot-app 0.0.52__tar.gz → 0.0.55__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. {ws_bom_robot_app-0.0.52/ws_bom_robot_app.egg-info → ws_bom_robot_app-0.0.55}/PKG-INFO +2 -2
  2. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/requirements.txt +1 -3
  3. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/setup.py +1 -1
  4. ws_bom_robot_app-0.0.55/ws_bom_robot_app/llm/agent_context.py +26 -0
  5. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/agent_lcel.py +5 -7
  6. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/api.py +2 -2
  7. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/main.py +8 -6
  8. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/models/api.py +25 -2
  9. ws_bom_robot_app-0.0.55/ws_bom_robot_app/llm/nebuly_handler.py +163 -0
  10. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/providers/llm_manager.py +5 -3
  11. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/tools/tool_manager.py +2 -1
  12. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/utils/agent.py +11 -1
  13. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/db/base.py +3 -3
  14. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55/ws_bom_robot_app.egg-info}/PKG-INFO +2 -2
  15. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app.egg-info/SOURCES.txt +2 -0
  16. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app.egg-info/requires.txt +1 -1
  17. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/MANIFEST.in +0 -0
  18. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/README.md +0 -0
  19. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/pyproject.toml +0 -0
  20. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/setup.cfg +0 -0
  21. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/__init__.py +0 -0
  22. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/auth.py +0 -0
  23. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/config.py +0 -0
  24. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/cron_manager.py +0 -0
  25. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/__init__.py +0 -0
  26. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/agent_description.py +0 -0
  27. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/agent_handler.py +0 -0
  28. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/defaut_prompt.py +0 -0
  29. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/models/__init__.py +0 -0
  30. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/models/base.py +0 -0
  31. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/models/kb.py +0 -0
  32. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/providers/__init__.py +0 -0
  33. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/settings.py +0 -0
  34. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/tools/__init__.py +0 -0
  35. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/tools/models/__init__.py +0 -0
  36. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/tools/models/main.py +0 -0
  37. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/tools/tool_builder.py +0 -0
  38. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/tools/utils.py +0 -0
  39. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/utils/__init__.py +0 -0
  40. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/utils/chunker.py +0 -0
  41. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/utils/download.py +0 -0
  42. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/utils/kb.py +0 -0
  43. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/utils/print.py +0 -0
  44. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/utils/secrets.py +0 -0
  45. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/utils/webhooks.py +0 -0
  46. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/__init__.py +0 -0
  47. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/db/__init__.py +0 -0
  48. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/db/chroma.py +0 -0
  49. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/db/faiss.py +0 -0
  50. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/db/manager.py +0 -0
  51. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/db/qdrant.py +0 -0
  52. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/generator.py +0 -0
  53. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/integration/__init__.py +0 -0
  54. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/integration/azure.py +0 -0
  55. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/integration/base.py +0 -0
  56. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/integration/confluence.py +0 -0
  57. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/integration/dropbox.py +0 -0
  58. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/integration/gcs.py +0 -0
  59. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/integration/github.py +0 -0
  60. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/integration/googledrive.py +0 -0
  61. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/integration/jira.py +0 -0
  62. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/integration/manager.py +0 -0
  63. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/integration/s3.py +0 -0
  64. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/integration/sftp.py +0 -0
  65. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/integration/sharepoint.py +0 -0
  66. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/integration/sitemap.py +0 -0
  67. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/integration/slack.py +0 -0
  68. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/loader/__init__.py +0 -0
  69. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/loader/base.py +0 -0
  70. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/loader/docling.py +0 -0
  71. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/llm/vector_store/loader/json_loader.py +0 -0
  72. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/main.py +0 -0
  73. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/task_manager.py +0 -0
  74. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app/util.py +0 -0
  75. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app.egg-info/dependency_links.txt +0 -0
  76. {ws_bom_robot_app-0.0.52 → ws_bom_robot_app-0.0.55}/ws_bom_robot_app.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ws_bom_robot_app
3
- Version: 0.0.52
3
+ Version: 0.0.55
4
4
  Summary: A FastAPI application serving ws bom/robot/llm platform ai.
5
5
  Home-page: https://github.com/websolutespa/bom
6
6
  Author: Websolute Spa
@@ -16,6 +16,7 @@ Requires-Dist: aiofiles==24.1.0
16
16
  Requires-Dist: pydantic==2.10.6
17
17
  Requires-Dist: pydantic-settings==2.7.1
18
18
  Requires-Dist: fastapi[standard]==0.115.8
19
+ Requires-Dist: chevron==0.14.0
19
20
  Requires-Dist: langchain==0.3.18
20
21
  Requires-Dist: langchain-community==0.3.17
21
22
  Requires-Dist: langchain-core==0.3.34
@@ -47,7 +48,6 @@ Requires-Dist: unstructured-ingest[sharepoint]
47
48
  Requires-Dist: unstructured-ingest[slack]
48
49
  Requires-Dist: html5lib==1.1
49
50
  Requires-Dist: markdownify==0.14.1
50
- Requires-Dist: nebuly==0.3.38
51
51
  Dynamic: author
52
52
  Dynamic: author-email
53
53
  Dynamic: classifier
@@ -5,6 +5,7 @@ aiofiles==24.1.0
5
5
  pydantic==2.10.6
6
6
  pydantic-settings==2.7.1
7
7
  fastapi[standard]==0.115.8
8
+ chevron==0.14.0
8
9
 
9
10
  #framework
10
11
  langchain==0.3.18
@@ -45,6 +46,3 @@ html5lib==1.1 #beautifulsoup4 parser
45
46
 
46
47
  #integrations
47
48
  markdownify==0.14.1 #sitemap
48
-
49
- #telemetry
50
- nebuly==0.3.38
@@ -4,7 +4,7 @@ _requirements = [line.split('#')[0].strip() for line in open("requirements.txt")
4
4
 
5
5
  setup(
6
6
  name="ws_bom_robot_app",
7
- version="0.0.52",
7
+ version="0.0.55",
8
8
  description="A FastAPI application serving ws bom/robot/llm platform ai.",
9
9
  long_description=open("README.md", encoding='utf-8').read(),
10
10
  long_description_content_type="text/markdown",
@@ -0,0 +1,26 @@
1
+ import uuid
2
+ from datetime import datetime
3
+ from pydantic import AliasChoices, BaseModel, ConfigDict, Field
4
+ from typing import Optional
5
+
6
+ class AgentContext(BaseModel):
7
+ class _i18n(BaseModel):
8
+ lg: Optional[str] = "en"
9
+ country: Optional[str] = "US"
10
+ timestamp: Optional[str] = Field(default_factory=lambda: datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
11
+ timezone: Optional[str] = "UTC"
12
+ model_config = ConfigDict(extra='allow')
13
+ class _user(BaseModel):
14
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()))
15
+ first_name: Optional[str] = Field(None, validation_alias=AliasChoices("firstName","first_name"))
16
+ last_name: Optional[str] = Field(None, validation_alias=AliasChoices("lastName","last_name"))
17
+ country: Optional[str] = ''
18
+ email: Optional[str] = ''
19
+ phone: Optional[str] = ''
20
+ role: Optional[list] = [] #i.e. ["admin","user","guest"]
21
+ department: Optional[list] = [] #i.e. ["R&D","IT","HR"]
22
+ permission: Optional[list] = [] #i.e. ["read","write","delete","execute"]
23
+ model_config = ConfigDict(extra='allow')
24
+ i18n: _i18n = Field(default_factory=_i18n)
25
+ user: Optional[_user] =Field(default_factory=_user)
26
+ model_config = ConfigDict(extra='allow')
@@ -1,9 +1,10 @@
1
- from typing import Any
1
+ from typing import Any, Optional
2
2
  from langchain.agents import AgentExecutor, create_tool_calling_agent
3
3
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
4
4
  from langchain_core.runnables import RunnableLambda
5
5
  from langchain_core.tools import render_text_description
6
- from datetime import datetime
6
+ import chevron
7
+ from ws_bom_robot_app.llm.agent_context import AgentContext
7
8
  from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
8
9
  from ws_bom_robot_app.llm.models.api import LlmMessage, LlmRules
9
10
  from ws_bom_robot_app.llm.utils.agent import get_rules
@@ -11,11 +12,8 @@ from ws_bom_robot_app.llm.defaut_prompt import default_prompt, tool_prompt
11
12
 
12
13
  class AgentLcel:
13
14
 
14
- def __init__(self, llm: LlmInterface, sys_message: str, tools: list, rules: LlmRules = None):
15
- self.sys_message = sys_message.format(
16
- date_stamp=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
17
- lang="it",
18
- )
15
+ def __init__(self, llm: LlmInterface, sys_message: str, sys_context: AgentContext, tools: list, rules: LlmRules = None):
16
+ self.sys_message = chevron.render(template=sys_message,data=sys_context)
19
17
  self.__llm = llm
20
18
  self.__tools = tools
21
19
  self.rules = rules
@@ -22,8 +22,8 @@ async def _invoke(rq: InvokeRequest):
22
22
 
23
23
  def _stream_headers(rq: StreamRequest) -> Mapping[str, str]:
24
24
  return {
25
- "X-thread-id": rq.thread_id,
26
- "X-msg-id": rq.msg_id,
25
+ "X-thread-id": rq.msg_id or str(uuid4()),
26
+ "X-msg-id": rq.msg_id or str(uuid4()),
27
27
  }
28
28
  @router.post("/stream")
29
29
  async def _stream(rq: StreamRequest, ctx: Request) -> StreamingResponse:
@@ -5,7 +5,6 @@ from langchain.callbacks.tracers import LangChainTracer
5
5
  from langchain_core.callbacks.base import AsyncCallbackHandler
6
6
  from langchain_core.messages import AIMessage, HumanMessage
7
7
  from langsmith import Client as LangSmithClient
8
- from nebuly.providers.langchain import LangChainTrackingHandler
9
8
  from typing import AsyncGenerator, List
10
9
  from ws_bom_robot_app.config import config
11
10
  from ws_bom_robot_app.llm.agent_description import AgentDescriptor
@@ -14,6 +13,7 @@ from ws_bom_robot_app.llm.agent_lcel import AgentLcel
14
13
  from ws_bom_robot_app.llm.models.api import InvokeRequest, StreamRequest
15
14
  from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
16
15
  from ws_bom_robot_app.llm.tools.tool_builder import get_structured_tools
16
+ from ws_bom_robot_app.llm.nebuly_handler import NebulyHandler
17
17
  import ws_bom_robot_app.llm.settings as settings
18
18
 
19
19
  async def invoke(rq: InvokeRequest) -> str:
@@ -92,15 +92,17 @@ async def __stream(rq: StreamRequest, ctx: Request, queue: Queue,formatted: bool
92
92
  processor = AgentLcel(
93
93
  llm=__llm,
94
94
  sys_message=rq.system_message,
95
+ sys_context=rq.system_context,
95
96
  tools=get_structured_tools(__llm, tools=rq.app_tools, callbacks=[callbacks], queue=queue),
96
97
  rules=rq.rules
97
98
  )
98
99
  if rq.secrets.get("nebulyApiKey","") != "":
99
- nebuly_callback = LangChainTrackingHandler(
100
- api_key= rq.secrets.get("nebulyApiKey"),
101
- user_id=rq.thread_id,
102
- nebuly_tags={"project": rq.lang_chain_project},
103
- )
100
+ nebuly_callback = NebulyHandler(
101
+ llm_model=__llm.config.model,
102
+ threadId=rq.thread_id,
103
+ url=config.NEBULY_API_URL,
104
+ api_key=rq.secrets.get("nebulyApiKey", None),
105
+ )
104
106
  callbacks.append(nebuly_callback)
105
107
 
106
108
  try:
@@ -3,6 +3,7 @@ from datetime import datetime
3
3
  from pydantic import AliasChoices, BaseModel, Field, ConfigDict
4
4
  from langchain_core.embeddings import Embeddings
5
5
  from langchain.chains.query_constructor.schema import AttributeInfo
6
+ from ws_bom_robot_app.llm.agent_context import AgentContext
6
7
  from ws_bom_robot_app.llm.models.kb import LlmKbEndpoint, LlmKbIntegration
7
8
  from ws_bom_robot_app.llm.providers.llm_manager import LlmManager, LlmConfig, LlmInterface
8
9
  from ws_bom_robot_app.llm.utils.download import download_file
@@ -74,11 +75,33 @@ class LlmAppTool(BaseModel):
74
75
  extra='allow'
75
76
  )
76
77
 
78
+ class NebulyInteraction(BaseModel):
79
+ conversation_id: str = Field(..., description="Unique identifier for grouping related interactions")
80
+ input: str = Field(..., description="User input text in the interaction")
81
+ output: str = Field(..., description="LLM response shown to the user")
82
+ time_start: str = Field(..., description="ISO 8601 formatted start time of the LLM call")
83
+ time_end: str = Field(..., description="ISO 8601 formatted end time of the LLM call")
84
+ end_user: str = Field(..., description="Unique identifier for the end user (recommended: hashed username/email or thread_id)")
85
+ tags: Optional[Dict[str, str]] = Field(default=None, description="Custom key-value pairs for tagging interactions")
86
+
87
+ class NebulyLLMTrace(BaseModel):
88
+ model: str = Field(..., description="The name of the LLM model used for the interaction")
89
+ messages: List[LlmMessage] = Field(..., description="List of messages exchanged during the interaction")
90
+ output: str = Field(..., description="The final output generated by the LLM")
91
+ input_tokens: Optional[int] = Field(..., description="Number of tokens in the input messages")
92
+ output_tokens: Optional[int] = Field(..., description="Number of tokens in the output message")
93
+
94
+ class NebulyRetrievalTrace(BaseModel):
95
+ source: Union[str, None] = Field(..., description="The source of the retrieved documents")
96
+ input: str = Field(..., description="The input query used for retrieval")
97
+ outputs: List[str] = Field(..., description="List of retrieved document contents")
98
+
77
99
  #region llm public endpoints
78
100
 
79
101
  #region api
80
102
  class LlmApp(BaseModel):
81
103
  system_message: str = Field(..., validation_alias=AliasChoices("systemMessage","system_message"))
104
+ system_context: Optional[AgentContext] = Field(AgentContext(), validation_alias=AliasChoices("systemContext","system_context"))
82
105
  messages: List[LlmMessage]
83
106
  provider: Optional[str] = "openai"
84
107
  model: Optional[str] = None
@@ -139,8 +162,8 @@ class InvokeRequest(LlmApp):
139
162
  mode: str
140
163
 
141
164
  class StreamRequest(LlmApp):
142
- thread_id: Optional[str] = Field(str(uuid.uuid4()), validation_alias=AliasChoices("threadId","thread_id"))
143
- msg_id: Optional[str] = Field(str(uuid.uuid4()), validation_alias=AliasChoices("msgId","msg_id"))
165
+ thread_id: Optional[str] = Field(None, validation_alias=AliasChoices("threadId","thread_id"))
166
+ msg_id: Optional[str] = Field(None, validation_alias=AliasChoices("msgId","msg_id"))
144
167
  #endregion
145
168
 
146
169
  #region vector_db
@@ -0,0 +1,163 @@
1
+ from typing import Union
2
+ from ws_bom_robot_app.llm.models.api import NebulyInteraction, NebulyLLMTrace, NebulyRetrievalTrace
3
+ from datetime import datetime, timezone
4
+ from langchain_core.callbacks.base import AsyncCallbackHandler
5
+ import ws_bom_robot_app.llm.settings as settings
6
+ from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
7
+ from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
8
+ from uuid import UUID
9
+
10
+ class NebulyHandler(AsyncCallbackHandler):
11
+ def __init__(self, llm_model: str | None, threadId: str = None, url: str = None, api_key: str = None):
12
+ super().__init__()
13
+ self.__started: bool = False
14
+ self.__url: str = url
15
+ self.__api_key: str = api_key
16
+ self.interaction = NebulyInteraction(
17
+ conversation_id=threadId,
18
+ input="",
19
+ output="",
20
+ time_start="",
21
+ time_end="",
22
+ end_user=threadId,
23
+ tags={"model": llm_model},
24
+ )
25
+ self.llm_trace = NebulyLLMTrace(
26
+ model=llm_model,
27
+ messages=[],
28
+ output="",
29
+ input_tokens=0,
30
+ output_tokens=0,
31
+ )
32
+ self.retrieval_trace = NebulyRetrievalTrace(
33
+ source=None,
34
+ input="",
35
+ outputs=[],
36
+ )
37
+
38
+ async def on_chat_model_start(self, serialized, messages, *, run_id, parent_run_id = None, tags = None, metadata = None, **kwargs):
39
+ # Initialize the interaction with the input message
40
+ if not self.__started:
41
+ message_list = self.__flat_messages(messages)
42
+ if isinstance(message_list[-1], HumanMessage):
43
+ if isinstance(message_list[-1].content, list):
44
+ self.interaction.input = self.__parse_multimodal_input(message_list[-1].content)
45
+ else:
46
+ self.interaction.input = message_list[-1].content
47
+ else:
48
+ raise ValueError("Last message is not a HumanMessage")
49
+ self.interaction.time_start = datetime.now().astimezone().isoformat()
50
+ self.__started = True
51
+
52
+ async def on_llm_end(self, response, *, run_id, parent_run_id = None, tags = None, **kwargs):
53
+ generation: Union[ChatGenerationChunk, GenerationChunk] = response.generations[0]
54
+ usage_metadata: dict = generation[0].message.usage_metadata
55
+ self.llm_trace.input_tokens = usage_metadata.get("input_tokens", 0)
56
+ self.llm_trace.output_tokens = usage_metadata.get("output_tokens", 0)
57
+
58
+ async def on_retriever_start(self, serialized, query, *, run_id, parent_run_id = None, tags = None, metadata = None, **kwargs):
59
+ if metadata.get("source"):
60
+ self.retrieval_trace.input = query
61
+ self.retrieval_trace.source = metadata.get("source", "retriever")
62
+
63
+ async def on_retriever_end(self, documents, *, run_id, parent_run_id = None, tags = None, **kwargs):
64
+ # pass the document source because of the large amount of data in the document content
65
+ self.retrieval_trace.outputs.extend([ doc.metadata.get("source", "content unavailable") for doc in documents])
66
+
67
+ async def on_agent_finish(self, finish, *, run_id, parent_run_id = None, tags = None, **kwargs):
68
+ # Interaction
69
+ self.interaction.output = finish.return_values["output"]
70
+ # Trace
71
+ self.llm_trace.output = finish.return_values["output"]
72
+ message_history = self._convert_to_json_format(settings.chat_history)[:-1]
73
+ self.llm_trace.messages = self.__parse_multimodal_history(message_history)
74
+ await self.__send_interaction()
75
+
76
+ def __flat_messages(self, messages: list[list[BaseMessage]], to_json: bool = False) -> list[BaseMessage]:
77
+ """
78
+ Maps the messages to the format expected by the LLM.
79
+ Flattens the nested list structure of messages.
80
+ """
81
+ # Flatten the nested list structure
82
+ flattened_messages = []
83
+ for message_list in messages:
84
+ flattened_messages.extend(message_list)
85
+ # Store JSON format in LLM trace
86
+ if to_json:
87
+ return self._convert_to_json_format(flattened_messages)
88
+ return flattened_messages
89
+
90
+ def _convert_to_json_format(self, messages: list[BaseMessage]) -> list[dict]:
91
+ """Converts BaseMessage objects to JSON format with role and content."""
92
+ result = []
93
+ for message in messages:
94
+ if isinstance(message, HumanMessage):
95
+ role = "user"
96
+ elif isinstance(message, AIMessage):
97
+ role = "assistant"
98
+ else:
99
+ role = "system"
100
+
101
+ result.append({
102
+ "role": role,
103
+ "content": message.content
104
+ })
105
+ return result
106
+
107
+ async def __send_interaction(self):
108
+ # Send the interaction to the server
109
+ from urllib.parse import urljoin
110
+ import requests
111
+
112
+ payload = self.__prepare_payload()
113
+ endpoint = urljoin(self.__url, "event-ingestion/api/v2/events/trace_interaction")
114
+ # Prepare headers with authentication
115
+ headers = {"Content-Type": "application/json"}
116
+ if self.__api_key:
117
+ headers["Authorization"] = f"Bearer {self.__api_key}"
118
+ response = requests.post(
119
+ url=endpoint,
120
+ json=payload,
121
+ headers=headers
122
+ )
123
+ if response.status_code != 200:
124
+ print(f"Failed to send interaction: {response.status_code} {response.text}")
125
+
126
+ def __prepare_payload(self):
127
+ self.interaction.time_end = datetime.now().astimezone().isoformat()
128
+ payload = {
129
+ "interaction": self.interaction.__dict__,
130
+ "traces": [
131
+ self.llm_trace.__dict__,
132
+ ]
133
+ }
134
+ if self.retrieval_trace.source:
135
+ payload["traces"].append(self.retrieval_trace.__dict__)
136
+ return payload
137
+
138
+ def __parse_multimodal_input(self, input: list[dict]) -> str:
139
+ # Parse the multimodal input and return a string representation
140
+ # This is a placeholder implementation, you can customize it as needed
141
+ parsed_input = ""
142
+ for item in input:
143
+ if item.get("type") == "text":
144
+ parsed_input += item.get("text", "")
145
+ elif item.get("type") == "image_url":
146
+ parsed_input += " <image>"
147
+ print(parsed_input)
148
+ return parsed_input
149
+
150
+ def __parse_multimodal_history(self, messages: list[dict]) -> list[dict]:
151
+ # Parse the multimodal history and return a list of dictionaries
152
+ parsed_history = []
153
+ for message in messages:
154
+ if isinstance(message["content"], list):
155
+ parsed_content = self.__parse_multimodal_input(message["content"])
156
+ else:
157
+ parsed_content = message["content"]
158
+ parsed_history.append({
159
+ "role": message["role"],
160
+ "content": parsed_content
161
+ })
162
+ return parsed_history
163
+
@@ -46,7 +46,8 @@ class OpenAI(LlmInterface):
46
46
  from langchain_openai import ChatOpenAI
47
47
  chat = ChatOpenAI(
48
48
  api_key=self.config.api_key or os.getenv("OPENAI_API_KEY"),
49
- model=self.config.model)
49
+ model=self.config.model,
50
+ stream_usage=True)
50
51
  if not any(self.config.model.startswith(prefix) for prefix in ["o1", "o3"]):
51
52
  chat.temperature = self.config.temperature
52
53
  chat.streaming = True
@@ -68,6 +69,7 @@ class DeepSeek(LlmInterface):
68
69
  max_tokens=8192,
69
70
  temperature=self.config.temperature,
70
71
  streaming=True,
72
+ stream_usage=True,
71
73
  )
72
74
 
73
75
  def get_models(self):
@@ -85,7 +87,7 @@ class Google(LlmInterface):
85
87
  api_key=self.config.api_key or os.getenv("GOOGLE_API_KEY"),
86
88
  model=self.config.model,
87
89
  temperature=self.config.temperature,
88
- disable_streaming=False
90
+ disable_streaming=False,
89
91
  )
90
92
 
91
93
  def get_embeddings(self):
@@ -138,7 +140,7 @@ class Anthropic(LlmInterface):
138
140
  model=self.config.model,
139
141
  temperature=self.config.temperature,
140
142
  streaming=True,
141
- stream_usage=False
143
+ stream_usage=True
142
144
  )
143
145
 
144
146
  """
@@ -94,7 +94,8 @@ class ToolManager:
94
94
  search_type,
95
95
  search_kwargs,
96
96
  app_tool=self.app_tool,
97
- llm=self.llm.get_llm()
97
+ llm=self.llm.get_llm(),
98
+ source=self.app_tool.function_id,
98
99
  )
99
100
  return []
100
101
  #raise ValueError(f"Invalid configuration for {self.settings.name} tool of type {self.settings.type}. Must be a function or vector db not found.")
@@ -16,7 +16,17 @@ async def get_rules(embeddings: Embeddings, rules: LlmRules, input: str | list)
16
16
  return ""
17
17
  # get the rules from the vector db and return prompt with rules
18
18
  rules_prompt = ""
19
- rules_doc = await VectorDbManager.get_strategy(rules.vector_type).invoke(embeddings, rules.vector_db,input,search_type="similarity_score_threshold", search_kwargs={"score_threshold": rules.threshold}) #type: ignore
19
+ rules_doc = await VectorDbManager.get_strategy(rules.vector_type).invoke(
20
+ embeddings,
21
+ rules.vector_db,
22
+ input,
23
+ search_type="similarity_score_threshold",
24
+ search_kwargs={
25
+ "score_threshold": rules.threshold,
26
+ "k": 500,
27
+ "fetch_k": 500,
28
+ },
29
+ source = None) #type: ignore
20
30
  if len(rules_doc) > 0:
21
31
  rules_prompt = "\nFollow this rules: \n RULES: \n"
22
32
  for rule_doc in rules_doc:
@@ -117,7 +117,7 @@ class VectorDBStrategy(ABC):
117
117
  if _description and _metadata:
118
118
  llm: BaseChatModel = kwargs["llm"]
119
119
  retriever = VectorDBStrategy._get_self_query_retriever(llm,self.get_loader(embeddings, storage_id),_description,_metadata)
120
- return await retriever.ainvoke(query)
120
+ return await retriever.ainvoke(query, config={"source": kwargs.get("source", "retriever")})
121
121
  if search_type == "mixed":
122
122
  similarity_retriever = self.get_retriever(embeddings, storage_id, "similarity", search_kwargs)
123
123
  mmr_kwargs = {
@@ -128,7 +128,7 @@ class VectorDBStrategy(ABC):
128
128
  mmr_retriever = self.get_retriever(embeddings, storage_id, "mmr", mmr_kwargs)
129
129
  return await VectorDBStrategy._combine_search([similarity_retriever, mmr_retriever], query)
130
130
  retriever = self.get_retriever(embeddings, storage_id, search_type, search_kwargs)
131
- return await retriever.ainvoke(query)
131
+ return await retriever.ainvoke(query, config={"source": kwargs.get("source", "retriever")})
132
132
 
133
133
  @staticmethod
134
134
  def _remove_duplicates(docs: List[Document]) -> List[Document]:
@@ -139,5 +139,5 @@ class VectorDBStrategy(ABC):
139
139
  retrievers: List[VectorStoreRetriever],
140
140
  query: str
141
141
  ) -> List[Document]:
142
- tasks = [retriever.ainvoke(query) for retriever in retrievers]
142
+ tasks = [retriever.ainvoke(query, config={"source": "custom source"}) for retriever in retrievers]
143
143
  return VectorDBStrategy._remove_duplicates([doc for res in await asyncio.gather(*tasks) for doc in res])
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ws_bom_robot_app
3
- Version: 0.0.52
3
+ Version: 0.0.55
4
4
  Summary: A FastAPI application serving ws bom/robot/llm platform ai.
5
5
  Home-page: https://github.com/websolutespa/bom
6
6
  Author: Websolute Spa
@@ -16,6 +16,7 @@ Requires-Dist: aiofiles==24.1.0
16
16
  Requires-Dist: pydantic==2.10.6
17
17
  Requires-Dist: pydantic-settings==2.7.1
18
18
  Requires-Dist: fastapi[standard]==0.115.8
19
+ Requires-Dist: chevron==0.14.0
19
20
  Requires-Dist: langchain==0.3.18
20
21
  Requires-Dist: langchain-community==0.3.17
21
22
  Requires-Dist: langchain-core==0.3.34
@@ -47,7 +48,6 @@ Requires-Dist: unstructured-ingest[sharepoint]
47
48
  Requires-Dist: unstructured-ingest[slack]
48
49
  Requires-Dist: html5lib==1.1
49
50
  Requires-Dist: markdownify==0.14.1
50
- Requires-Dist: nebuly==0.3.38
51
51
  Dynamic: author
52
52
  Dynamic: author-email
53
53
  Dynamic: classifier
@@ -16,12 +16,14 @@ ws_bom_robot_app.egg-info/dependency_links.txt
16
16
  ws_bom_robot_app.egg-info/requires.txt
17
17
  ws_bom_robot_app.egg-info/top_level.txt
18
18
  ws_bom_robot_app/llm/__init__.py
19
+ ws_bom_robot_app/llm/agent_context.py
19
20
  ws_bom_robot_app/llm/agent_description.py
20
21
  ws_bom_robot_app/llm/agent_handler.py
21
22
  ws_bom_robot_app/llm/agent_lcel.py
22
23
  ws_bom_robot_app/llm/api.py
23
24
  ws_bom_robot_app/llm/defaut_prompt.py
24
25
  ws_bom_robot_app/llm/main.py
26
+ ws_bom_robot_app/llm/nebuly_handler.py
25
27
  ws_bom_robot_app/llm/settings.py
26
28
  ws_bom_robot_app/llm/models/__init__.py
27
29
  ws_bom_robot_app/llm/models/api.py
@@ -4,6 +4,7 @@ aiofiles==24.1.0
4
4
  pydantic==2.10.6
5
5
  pydantic-settings==2.7.1
6
6
  fastapi[standard]==0.115.8
7
+ chevron==0.14.0
7
8
  langchain==0.3.18
8
9
  langchain-community==0.3.17
9
10
  langchain-core==0.3.34
@@ -35,4 +36,3 @@ unstructured-ingest[sharepoint]
35
36
  unstructured-ingest[slack]
36
37
  html5lib==1.1
37
38
  markdownify==0.14.1
38
- nebuly==0.3.38