ws-bom-robot-app 0.0.33__py3-none-any.whl → 0.0.35__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ws_bom_robot_app/config.py +10 -1
- ws_bom_robot_app/llm/agent_description.py +123 -124
- ws_bom_robot_app/llm/agent_handler.py +180 -167
- ws_bom_robot_app/llm/agent_lcel.py +54 -64
- ws_bom_robot_app/llm/api.py +33 -21
- ws_bom_robot_app/llm/defaut_prompt.py +15 -9
- ws_bom_robot_app/llm/main.py +109 -102
- ws_bom_robot_app/llm/models/api.py +55 -7
- ws_bom_robot_app/llm/models/kb.py +11 -2
- ws_bom_robot_app/llm/providers/__init__.py +0 -0
- ws_bom_robot_app/llm/providers/llm_manager.py +174 -0
- ws_bom_robot_app/llm/settings.py +4 -4
- ws_bom_robot_app/llm/tools/models/main.py +5 -3
- ws_bom_robot_app/llm/tools/tool_builder.py +23 -19
- ws_bom_robot_app/llm/tools/tool_manager.py +133 -101
- ws_bom_robot_app/llm/tools/utils.py +25 -25
- ws_bom_robot_app/llm/utils/agent_utils.py +17 -16
- ws_bom_robot_app/llm/utils/download.py +79 -79
- ws_bom_robot_app/llm/utils/print.py +29 -29
- ws_bom_robot_app/llm/utils/secrets.py +26 -0
- ws_bom_robot_app/llm/vector_store/generator.py +137 -137
- ws_bom_robot_app/llm/vector_store/integration/base.py +12 -1
- ws_bom_robot_app/llm/vector_store/loader/base.py +6 -5
- ws_bom_robot_app/llm/vector_store/loader/docling.py +27 -6
- ws_bom_robot_app/llm/vector_store/loader/json_loader.py +25 -25
- ws_bom_robot_app/main.py +7 -2
- {ws_bom_robot_app-0.0.33.dist-info → ws_bom_robot_app-0.0.35.dist-info}/METADATA +25 -12
- {ws_bom_robot_app-0.0.33.dist-info → ws_bom_robot_app-0.0.35.dist-info}/RECORD +30 -28
- ws_bom_robot_app/llm/utils/faiss_helper.py +0 -127
- {ws_bom_robot_app-0.0.33.dist-info → ws_bom_robot_app-0.0.35.dist-info}/WHEEL +0 -0
- {ws_bom_robot_app-0.0.33.dist-info → ws_bom_robot_app-0.0.35.dist-info}/top_level.txt +0 -0
ws_bom_robot_app/config.py
CHANGED
|
@@ -20,12 +20,21 @@ class Settings(BaseSettings):
|
|
|
20
20
|
robot_cms_auth: str = ''
|
|
21
21
|
robot_cms_db_folder: str = 'llmVectorDb'
|
|
22
22
|
robot_cms_kb_folder: str ='llmKbFile'
|
|
23
|
-
|
|
23
|
+
robot_debugger_llm_provider: str = 'openai'
|
|
24
|
+
robot_debugger_llm_model: str = 'gpt-4o'
|
|
25
|
+
robot_debugger_llm_key: str = ''
|
|
26
|
+
robot_debugger_embedding_key: str = ''
|
|
27
|
+
OPENAI_API_KEY: str = '' # used for dall-e api
|
|
28
|
+
GOOGLE_APPLICATION_CREDENTIALS: str = './.secrets/google.json' # path to google credentials iam file
|
|
24
29
|
model_config = ConfigDict(
|
|
25
30
|
env_file='./.env',
|
|
26
31
|
extra='ignore',
|
|
27
32
|
case_sensitive=False
|
|
28
33
|
)
|
|
34
|
+
def __init__(self, **kwargs):
|
|
35
|
+
super().__init__(**kwargs)
|
|
36
|
+
os.environ["OPENAI_API_KEY"] = self.OPENAI_API_KEY
|
|
37
|
+
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = self.GOOGLE_APPLICATION_CREDENTIALS
|
|
29
38
|
|
|
30
39
|
class RuntimeOptions(BaseModel):
|
|
31
40
|
@staticmethod
|
|
@@ -1,124 +1,123 @@
|
|
|
1
|
-
import json, requests, re
|
|
2
|
-
from typing import Any
|
|
3
|
-
from abc import ABC, abstractmethod
|
|
4
|
-
from
|
|
5
|
-
from langchain_core.
|
|
6
|
-
from langchain_core.
|
|
7
|
-
from langchain_core.runnables import
|
|
8
|
-
from
|
|
9
|
-
from
|
|
10
|
-
from ws_bom_robot_app.llm.
|
|
11
|
-
from ws_bom_robot_app.llm.utils.agent_utils import get_rules
|
|
12
|
-
|
|
13
|
-
# SafeDict helper class
|
|
14
|
-
class SafeDict(dict):
|
|
15
|
-
def __missing__(self, key):
|
|
16
|
-
return ''
|
|
17
|
-
|
|
18
|
-
# Strategy Interface
|
|
19
|
-
class AgentDescriptorStrategy(ABC):
|
|
20
|
-
@abstractmethod
|
|
21
|
-
def enrich_prompt(self, prompt: str, input: dict) -> str:
|
|
22
|
-
pass
|
|
23
|
-
|
|
24
|
-
@abstractmethod
|
|
25
|
-
def rule_input(self, input: dict) -> str:
|
|
26
|
-
pass
|
|
27
|
-
|
|
28
|
-
# Concrete Strategy for Default Agent
|
|
29
|
-
class DefaultAgentDescriptor(AgentDescriptorStrategy):
|
|
30
|
-
def enrich_prompt(self, prompt: str, input: dict) -> str:
|
|
31
|
-
# Default enrichment logic (could be minimal or no-op)
|
|
32
|
-
return prompt.format_map(SafeDict(input))
|
|
33
|
-
|
|
34
|
-
def rule_input(self, input: dict) -> str:
|
|
35
|
-
return input.get('content', "")
|
|
36
|
-
|
|
37
|
-
# Concrete Strategy for URL2Text Agent
|
|
38
|
-
class URL2TextAgentDescriptor(AgentDescriptorStrategy):
|
|
39
|
-
def enrich_prompt(self, prompt: str, input: dict) -> str:
|
|
40
|
-
input["context"] = self._get_page_text(input)
|
|
41
|
-
return prompt.format_map(SafeDict(input))
|
|
42
|
-
|
|
43
|
-
def rule_input(self, input: dict) -> str:
|
|
44
|
-
return input.get('context', "")
|
|
45
|
-
|
|
46
|
-
def _get_page_text(self, input: dict) -> str:
|
|
47
|
-
url = input.get("content", "")
|
|
48
|
-
exclusions = input.get("exclude", {})
|
|
49
|
-
response = requests.get(url)
|
|
50
|
-
response.raise_for_status()
|
|
51
|
-
soup = BeautifulSoup(response.content, 'html5lib')
|
|
52
|
-
classes_to_exclude = exclusions.get("classes", [])
|
|
53
|
-
ids_to_exclude = exclusions.get("ids", [])
|
|
54
|
-
for class_name in classes_to_exclude:
|
|
55
|
-
for element in soup.find_all(class_=class_name):
|
|
56
|
-
element.extract()
|
|
57
|
-
for id_name in ids_to_exclude:
|
|
58
|
-
for element in soup.find_all(id=id_name):
|
|
59
|
-
element.extract()
|
|
60
|
-
for script in soup(["script", "noscript", "style", "head", "footer", "iframe"]):
|
|
61
|
-
script.extract()
|
|
62
|
-
return re.sub(' +', ' ', soup.get_text())
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
class AgentDescriptor:
|
|
66
|
-
# Dictionary to hold all agent strategies
|
|
67
|
-
_list: dict[str,AgentDescriptorStrategy] = {
|
|
68
|
-
"default": DefaultAgentDescriptor(),
|
|
69
|
-
"url2text": URL2TextAgentDescriptor(),
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
# Functions to manage strategies
|
|
73
|
-
@staticmethod
|
|
74
|
-
def add_strategy(name: str, strategy: AgentDescriptorStrategy):
|
|
75
|
-
"""_summary_
|
|
76
|
-
add a new strategy to the dictionary
|
|
77
|
-
Args:
|
|
78
|
-
name (str): name of the strategy, in lowercase
|
|
79
|
-
strategy (AgentDescriptorStrategy): class implementing the strategy
|
|
80
|
-
Examples:
|
|
81
|
-
AgentDescriptor.add_strategy("custom_agent_descriptor", CustomAgentDescriptor())
|
|
82
|
-
"""
|
|
83
|
-
AgentDescriptor._list[name.lower()] = strategy
|
|
84
|
-
|
|
85
|
-
@staticmethod
|
|
86
|
-
def get_strategy(name: str) -> AgentDescriptorStrategy:
|
|
87
|
-
return AgentDescriptor._list.get(name.lower(), DefaultAgentDescriptor())
|
|
88
|
-
|
|
89
|
-
def __init__(self,
|
|
90
|
-
self.__prompt = prompt
|
|
91
|
-
self.__llm =
|
|
92
|
-
self.
|
|
93
|
-
self.
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
("
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
response
|
|
124
|
-
return response
|
|
1
|
+
import json, requests, re
|
|
2
|
+
from typing import Any
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from langchain_core.prompts import ChatPromptTemplate
|
|
5
|
+
from langchain_core.messages import AIMessage
|
|
6
|
+
from langchain_core.runnables import RunnableSerializable
|
|
7
|
+
from langchain_core.runnables import RunnableLambda
|
|
8
|
+
from bs4 import BeautifulSoup
|
|
9
|
+
from ws_bom_robot_app.llm.models.api import LlmRules
|
|
10
|
+
from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
|
|
11
|
+
from ws_bom_robot_app.llm.utils.agent_utils import get_rules
|
|
12
|
+
|
|
13
|
+
# SafeDict helper class
|
|
14
|
+
class SafeDict(dict):
|
|
15
|
+
def __missing__(self, key):
|
|
16
|
+
return ''
|
|
17
|
+
|
|
18
|
+
# Strategy Interface
|
|
19
|
+
class AgentDescriptorStrategy(ABC):
|
|
20
|
+
@abstractmethod
|
|
21
|
+
def enrich_prompt(self, prompt: str, input: dict) -> str:
|
|
22
|
+
pass
|
|
23
|
+
|
|
24
|
+
@abstractmethod
|
|
25
|
+
def rule_input(self, input: dict) -> str:
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
# Concrete Strategy for Default Agent
|
|
29
|
+
class DefaultAgentDescriptor(AgentDescriptorStrategy):
|
|
30
|
+
def enrich_prompt(self, prompt: str, input: dict) -> str:
|
|
31
|
+
# Default enrichment logic (could be minimal or no-op)
|
|
32
|
+
return prompt.format_map(SafeDict(input))
|
|
33
|
+
|
|
34
|
+
def rule_input(self, input: dict) -> str:
|
|
35
|
+
return input.get('content', "")
|
|
36
|
+
|
|
37
|
+
# Concrete Strategy for URL2Text Agent
|
|
38
|
+
class URL2TextAgentDescriptor(AgentDescriptorStrategy):
|
|
39
|
+
def enrich_prompt(self, prompt: str, input: dict) -> str:
|
|
40
|
+
input["context"] = self._get_page_text(input)
|
|
41
|
+
return prompt.format_map(SafeDict(input))
|
|
42
|
+
|
|
43
|
+
def rule_input(self, input: dict) -> str:
|
|
44
|
+
return input.get('context', "")
|
|
45
|
+
|
|
46
|
+
def _get_page_text(self, input: dict) -> str:
|
|
47
|
+
url = input.get("content", "")
|
|
48
|
+
exclusions = input.get("exclude", {})
|
|
49
|
+
response = requests.get(url)
|
|
50
|
+
response.raise_for_status()
|
|
51
|
+
soup = BeautifulSoup(response.content, 'html5lib')
|
|
52
|
+
classes_to_exclude = exclusions.get("classes", [])
|
|
53
|
+
ids_to_exclude = exclusions.get("ids", [])
|
|
54
|
+
for class_name in classes_to_exclude:
|
|
55
|
+
for element in soup.find_all(class_=class_name):
|
|
56
|
+
element.extract()
|
|
57
|
+
for id_name in ids_to_exclude:
|
|
58
|
+
for element in soup.find_all(id=id_name):
|
|
59
|
+
element.extract()
|
|
60
|
+
for script in soup(["script", "noscript", "style", "head", "footer", "iframe"]):
|
|
61
|
+
script.extract()
|
|
62
|
+
return re.sub(' +', ' ', soup.get_text())
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class AgentDescriptor:
|
|
66
|
+
# Dictionary to hold all agent strategies
|
|
67
|
+
_list: dict[str,AgentDescriptorStrategy] = {
|
|
68
|
+
"default": DefaultAgentDescriptor(),
|
|
69
|
+
"url2text": URL2TextAgentDescriptor(),
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
# Functions to manage strategies
|
|
73
|
+
@staticmethod
|
|
74
|
+
def add_strategy(name: str, strategy: AgentDescriptorStrategy):
|
|
75
|
+
"""_summary_
|
|
76
|
+
add a new strategy to the dictionary
|
|
77
|
+
Args:
|
|
78
|
+
name (str): name of the strategy, in lowercase
|
|
79
|
+
strategy (AgentDescriptorStrategy): class implementing the strategy
|
|
80
|
+
Examples:
|
|
81
|
+
AgentDescriptor.add_strategy("custom_agent_descriptor", CustomAgentDescriptor())
|
|
82
|
+
"""
|
|
83
|
+
AgentDescriptor._list[name.lower()] = strategy
|
|
84
|
+
|
|
85
|
+
@staticmethod
|
|
86
|
+
def get_strategy(name: str) -> AgentDescriptorStrategy:
|
|
87
|
+
return AgentDescriptor._list.get(name.lower(), DefaultAgentDescriptor())
|
|
88
|
+
|
|
89
|
+
def __init__(self, llm: LlmInterface, prompt: str, mode: str, rules: LlmRules = None):
|
|
90
|
+
self.__prompt = prompt
|
|
91
|
+
self.__llm = llm,
|
|
92
|
+
self.rules= rules
|
|
93
|
+
self.strategy = self.get_strategy(mode) # Selects the strategy from the dictionary
|
|
94
|
+
|
|
95
|
+
async def __create_prompt(self, input_dict: dict):
|
|
96
|
+
input_data = json.loads(input_dict.get("input", {}))
|
|
97
|
+
system = self.strategy.enrich_prompt(self.__prompt, input_data)
|
|
98
|
+
if self.rules:
|
|
99
|
+
rule_input = self.strategy.rule_input(input_data)
|
|
100
|
+
rules_prompt = await get_rules(self.__llm.get_embeddings(), self.rules, rule_input)
|
|
101
|
+
system += rules_prompt
|
|
102
|
+
return ChatPromptTemplate.from_messages(
|
|
103
|
+
[
|
|
104
|
+
("system", system),
|
|
105
|
+
("user", input_data.get("content", ""))
|
|
106
|
+
]
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
def __create_agent_descriptor(self, content) -> RunnableSerializable[Any, Any]:
|
|
110
|
+
content = json.loads(content)
|
|
111
|
+
agent = (
|
|
112
|
+
{
|
|
113
|
+
"input": lambda x: x["input"],
|
|
114
|
+
}
|
|
115
|
+
| RunnableLambda(self.__create_prompt)
|
|
116
|
+
| self.__llm.get_llm()
|
|
117
|
+
)
|
|
118
|
+
return agent
|
|
119
|
+
|
|
120
|
+
async def run_agent(self, content) -> Any:
|
|
121
|
+
agent_descriptor = self.__create_agent_descriptor(content)
|
|
122
|
+
response: AIMessage = await agent_descriptor.ainvoke({"input": content})
|
|
123
|
+
return response
|
|
@@ -1,167 +1,180 @@
|
|
|
1
|
-
from asyncio import Queue
|
|
2
|
-
from langchain_core.agents import AgentFinish
|
|
3
|
-
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
|
|
4
|
-
from langchain.callbacks.base import AsyncCallbackHandler
|
|
5
|
-
from ws_bom_robot_app.llm.utils.print import printJson, printString
|
|
6
|
-
from typing import Any, Dict, List, Optional, Union
|
|
7
|
-
from uuid import UUID
|
|
8
|
-
import ws_bom_robot_app.llm.settings as settings
|
|
9
|
-
from langchain_core.callbacks.base import AsyncCallbackHandler
|
|
10
|
-
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
|
|
11
|
-
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
|
|
12
|
-
import json
|
|
13
|
-
|
|
14
|
-
# Here is a custom handler that will print the tokens to stdout.
|
|
15
|
-
# Instead of printing to stdout you can send the data elsewhere; e.g., to a streaming API response
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
) -> None:
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
run_id: UUID,
|
|
158
|
-
parent_run_id: UUID = None,
|
|
159
|
-
tags: List[str] = None,
|
|
160
|
-
**kwargs: Any,
|
|
161
|
-
) -> None:
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
1
|
+
from asyncio import Queue
|
|
2
|
+
from langchain_core.agents import AgentFinish
|
|
3
|
+
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
|
|
4
|
+
from langchain.callbacks.base import AsyncCallbackHandler
|
|
5
|
+
from ws_bom_robot_app.llm.utils.print import printJson, printString
|
|
6
|
+
from typing import Any, Dict, List, Optional, Union
|
|
7
|
+
from uuid import UUID
|
|
8
|
+
import ws_bom_robot_app.llm.settings as settings
|
|
9
|
+
from langchain_core.callbacks.base import AsyncCallbackHandler
|
|
10
|
+
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
|
|
11
|
+
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
|
|
12
|
+
import json
|
|
13
|
+
|
|
14
|
+
# Here is a custom handler that will print the tokens to stdout.
|
|
15
|
+
# Instead of printing to stdout you can send the data elsewhere; e.g., to a streaming API response
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _parse_token(llm:str,token: str) -> str:
|
|
19
|
+
"""Parses the token based on the LLM provider."""
|
|
20
|
+
if llm == "anthropic" and isinstance(token, list):
|
|
21
|
+
first = token[0]
|
|
22
|
+
if 'text' in first:
|
|
23
|
+
token = first['text']
|
|
24
|
+
else:
|
|
25
|
+
#[{'id': 'toolu_01GGLwJcrQ8PvFMUkQPGu8n7', 'input': {}, 'name': 'document_retriever_xxx', 'type': 'tool_use', 'index': 1}]
|
|
26
|
+
token = ""
|
|
27
|
+
return token
|
|
28
|
+
|
|
29
|
+
class AgentHandler(AsyncCallbackHandler):
|
|
30
|
+
|
|
31
|
+
def __init__(self, queue: Queue, llm:str, threadId: str = None) -> None:
|
|
32
|
+
super().__init__()
|
|
33
|
+
self._threadId = threadId
|
|
34
|
+
self.json_block = ""
|
|
35
|
+
self.is_json_block = False
|
|
36
|
+
self.backtick_count = 0 # Conteggio dei backticks per il controllo accurato
|
|
37
|
+
self.queue = queue
|
|
38
|
+
self.llm = llm
|
|
39
|
+
|
|
40
|
+
async def on_llm_start(
|
|
41
|
+
self,
|
|
42
|
+
serialized: Dict[str, Any],
|
|
43
|
+
prompts: List[str],
|
|
44
|
+
*,
|
|
45
|
+
run_id: UUID,
|
|
46
|
+
parent_run_id: UUID = None,
|
|
47
|
+
tags: List[str] = None,
|
|
48
|
+
metadata: Dict[str, Any] = None,
|
|
49
|
+
**kwargs: Any,
|
|
50
|
+
) -> None:
|
|
51
|
+
firstChunk = {
|
|
52
|
+
"type": "info",
|
|
53
|
+
"threadId": self._threadId,
|
|
54
|
+
}
|
|
55
|
+
await self.queue.put(printString(firstChunk))
|
|
56
|
+
|
|
57
|
+
"""async def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], *, run_id: UUID = None, parent_run_id = None, tags = None, metadata = None, **kwargs: Any) -> Any:
|
|
58
|
+
pass"""
|
|
59
|
+
|
|
60
|
+
async def on_tool_end(self, output: Any, *, run_id: UUID, parent_run_id: UUID = None, tags: List[str] = None, **kwargs: Any) -> None:
|
|
61
|
+
pass
|
|
62
|
+
|
|
63
|
+
async def on_llm_new_token(
|
|
64
|
+
self,
|
|
65
|
+
token: str,
|
|
66
|
+
*,
|
|
67
|
+
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
|
|
68
|
+
run_id: UUID,
|
|
69
|
+
parent_run_id: Optional[UUID] = None,
|
|
70
|
+
tags: Optional[List[str]] = None,
|
|
71
|
+
**kwargs: Any,
|
|
72
|
+
) -> None:
|
|
73
|
+
"""Gestisce i nuovi token durante lo streaming."""
|
|
74
|
+
|
|
75
|
+
if token:
|
|
76
|
+
token = _parse_token(self.llm,token)
|
|
77
|
+
if token:
|
|
78
|
+
self.backtick_count += token.count("`")
|
|
79
|
+
|
|
80
|
+
if self.backtick_count >= 3:
|
|
81
|
+
if not self.is_json_block:
|
|
82
|
+
self.is_json_block = True
|
|
83
|
+
self.json_block = ""
|
|
84
|
+
else:
|
|
85
|
+
self.is_json_block = False
|
|
86
|
+
self.json_block += token.replace("```json", '')
|
|
87
|
+
await self.process_json_block(self.json_block)
|
|
88
|
+
self.json_block = ""
|
|
89
|
+
self.backtick_count = 0
|
|
90
|
+
elif self.is_json_block:
|
|
91
|
+
self.json_block += token
|
|
92
|
+
else:
|
|
93
|
+
await self.queue.put(printString(token))
|
|
94
|
+
|
|
95
|
+
async def on_agent_finish(
|
|
96
|
+
self,
|
|
97
|
+
finish: AgentFinish,
|
|
98
|
+
*,
|
|
99
|
+
run_id: UUID,
|
|
100
|
+
parent_run_id: UUID = None,
|
|
101
|
+
tags: List[str] = None,
|
|
102
|
+
**kwargs: Any,
|
|
103
|
+
) -> None:
|
|
104
|
+
settings.chat_history.extend(
|
|
105
|
+
[
|
|
106
|
+
AIMessage(content=_parse_token(self.llm,finish.return_values["output"])),
|
|
107
|
+
]
|
|
108
|
+
)
|
|
109
|
+
finalChunk = {"type": "end"}
|
|
110
|
+
await self.queue.put(printJson(finalChunk))
|
|
111
|
+
await self.queue.put(None)
|
|
112
|
+
|
|
113
|
+
async def process_json_block(self, json_block: str):
|
|
114
|
+
"""Processa il blocco JSON completo."""
|
|
115
|
+
# Rimuove il delimitatore iniziale '```json' se presente, e spazi vuoti
|
|
116
|
+
json_block_clean = json_block.replace('```', '').replace('json', '').strip()
|
|
117
|
+
# Verifica che il blocco non sia vuoto prima di tentare il parsing
|
|
118
|
+
if json_block_clean:
|
|
119
|
+
try:
|
|
120
|
+
# Prova a fare il parsing del JSON
|
|
121
|
+
parsed_json = json.loads(json_block_clean)
|
|
122
|
+
await self.queue.put(printJson(parsed_json))
|
|
123
|
+
except json.JSONDecodeError as e:
|
|
124
|
+
# Se il JSON è malformato, logga l'errore
|
|
125
|
+
raise e
|
|
126
|
+
|
|
127
|
+
class RawAgentHandler(AsyncCallbackHandler):
|
|
128
|
+
|
|
129
|
+
def __init__(self,queue: Queue, llm: str) -> None:
|
|
130
|
+
super().__init__()
|
|
131
|
+
self.queue = queue
|
|
132
|
+
self.llm = llm
|
|
133
|
+
async def on_llm_start(
|
|
134
|
+
self,
|
|
135
|
+
serialized: Dict[str, Any],
|
|
136
|
+
prompts: List[str],
|
|
137
|
+
*,
|
|
138
|
+
run_id: UUID,
|
|
139
|
+
parent_run_id: UUID = None,
|
|
140
|
+
tags: List[str] = None,
|
|
141
|
+
metadata: Dict[str, Any] = None,
|
|
142
|
+
**kwargs: Any,
|
|
143
|
+
) -> None:
|
|
144
|
+
pass
|
|
145
|
+
|
|
146
|
+
"""async def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], *, run_id: UUID = None, parent_run_id = None, tags = None, metadata = None, **kwargs: Any) -> Any:
|
|
147
|
+
pass"""
|
|
148
|
+
|
|
149
|
+
async def on_tool_end(self, output: Any, *, run_id: UUID, parent_run_id: UUID = None, tags: List[str] = None, **kwargs: Any) -> None:
|
|
150
|
+
pass
|
|
151
|
+
|
|
152
|
+
async def on_llm_new_token(
|
|
153
|
+
self,
|
|
154
|
+
token: str,
|
|
155
|
+
*,
|
|
156
|
+
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
|
|
157
|
+
run_id: UUID,
|
|
158
|
+
parent_run_id: Optional[UUID] = None,
|
|
159
|
+
tags: Optional[List[str]] = None,
|
|
160
|
+
**kwargs: Any,
|
|
161
|
+
) -> None:
|
|
162
|
+
"""Handles new tokens during streaming."""
|
|
163
|
+
if token: # Only process non-empty tokens
|
|
164
|
+
await self.queue.put(_parse_token(self.llm,token))
|
|
165
|
+
|
|
166
|
+
async def on_agent_finish(
|
|
167
|
+
self,
|
|
168
|
+
finish: AgentFinish,
|
|
169
|
+
*,
|
|
170
|
+
run_id: UUID,
|
|
171
|
+
parent_run_id: UUID = None,
|
|
172
|
+
tags: List[str] = None,
|
|
173
|
+
**kwargs: Any,
|
|
174
|
+
) -> None:
|
|
175
|
+
settings.chat_history.extend(
|
|
176
|
+
[
|
|
177
|
+
AIMessage(content=_parse_token(self.llm,finish.return_values["output"]))
|
|
178
|
+
]
|
|
179
|
+
)
|
|
180
|
+
await self.queue.put(None)
|