lfx-nightly 0.1.13.dev0__py3-none-any.whl → 0.2.0.dev26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lfx/_assets/component_index.json +1 -1
- lfx/base/agents/agent.py +121 -29
- lfx/base/agents/altk_base_agent.py +380 -0
- lfx/base/agents/altk_tool_wrappers.py +565 -0
- lfx/base/agents/events.py +103 -35
- lfx/base/agents/utils.py +15 -2
- lfx/base/composio/composio_base.py +183 -233
- lfx/base/data/base_file.py +88 -21
- lfx/base/data/storage_utils.py +192 -0
- lfx/base/data/utils.py +178 -14
- lfx/base/datastax/__init__.py +5 -0
- lfx/{components/vectorstores/astradb.py → base/datastax/astradb_base.py} +84 -473
- lfx/base/embeddings/embeddings_class.py +113 -0
- lfx/base/io/chat.py +5 -4
- lfx/base/mcp/util.py +101 -15
- lfx/base/models/groq_constants.py +74 -58
- lfx/base/models/groq_model_discovery.py +265 -0
- lfx/base/models/model.py +1 -1
- lfx/base/models/model_input_constants.py +74 -7
- lfx/base/models/model_utils.py +100 -0
- lfx/base/models/ollama_constants.py +3 -0
- lfx/base/models/openai_constants.py +7 -0
- lfx/base/models/watsonx_constants.py +36 -0
- lfx/base/tools/run_flow.py +601 -129
- lfx/cli/commands.py +7 -4
- lfx/cli/common.py +2 -2
- lfx/cli/run.py +1 -1
- lfx/cli/script_loader.py +53 -11
- lfx/components/Notion/create_page.py +1 -1
- lfx/components/Notion/list_database_properties.py +1 -1
- lfx/components/Notion/list_pages.py +1 -1
- lfx/components/Notion/list_users.py +1 -1
- lfx/components/Notion/page_content_viewer.py +1 -1
- lfx/components/Notion/search.py +1 -1
- lfx/components/Notion/update_page_property.py +1 -1
- lfx/components/__init__.py +19 -5
- lfx/components/altk/__init__.py +34 -0
- lfx/components/altk/altk_agent.py +193 -0
- lfx/components/amazon/amazon_bedrock_converse.py +1 -1
- lfx/components/apify/apify_actor.py +4 -4
- lfx/components/composio/__init__.py +70 -18
- lfx/components/composio/apollo_composio.py +11 -0
- lfx/components/composio/bitbucket_composio.py +11 -0
- lfx/components/composio/canva_composio.py +11 -0
- lfx/components/composio/coda_composio.py +11 -0
- lfx/components/composio/composio_api.py +10 -0
- lfx/components/composio/discord_composio.py +1 -1
- lfx/components/composio/elevenlabs_composio.py +11 -0
- lfx/components/composio/exa_composio.py +11 -0
- lfx/components/composio/firecrawl_composio.py +11 -0
- lfx/components/composio/fireflies_composio.py +11 -0
- lfx/components/composio/gmail_composio.py +1 -1
- lfx/components/composio/googlebigquery_composio.py +11 -0
- lfx/components/composio/googlecalendar_composio.py +1 -1
- lfx/components/composio/googledocs_composio.py +1 -1
- lfx/components/composio/googlemeet_composio.py +1 -1
- lfx/components/composio/googlesheets_composio.py +1 -1
- lfx/components/composio/googletasks_composio.py +1 -1
- lfx/components/composio/heygen_composio.py +11 -0
- lfx/components/composio/mem0_composio.py +11 -0
- lfx/components/composio/peopledatalabs_composio.py +11 -0
- lfx/components/composio/perplexityai_composio.py +11 -0
- lfx/components/composio/serpapi_composio.py +11 -0
- lfx/components/composio/slack_composio.py +3 -574
- lfx/components/composio/slackbot_composio.py +1 -1
- lfx/components/composio/snowflake_composio.py +11 -0
- lfx/components/composio/tavily_composio.py +11 -0
- lfx/components/composio/youtube_composio.py +2 -2
- lfx/components/{agents → cuga}/__init__.py +5 -7
- lfx/components/cuga/cuga_agent.py +730 -0
- lfx/components/data/__init__.py +78 -28
- lfx/components/data_source/__init__.py +58 -0
- lfx/components/{data → data_source}/api_request.py +26 -3
- lfx/components/{data → data_source}/csv_to_data.py +15 -10
- lfx/components/{data → data_source}/json_to_data.py +15 -8
- lfx/components/{data → data_source}/news_search.py +1 -1
- lfx/components/{data → data_source}/rss.py +1 -1
- lfx/components/{data → data_source}/sql_executor.py +1 -1
- lfx/components/{data → data_source}/url.py +1 -1
- lfx/components/{data → data_source}/web_search.py +1 -1
- lfx/components/datastax/__init__.py +12 -6
- lfx/components/datastax/{astra_assistant_manager.py → astradb_assistant_manager.py} +1 -0
- lfx/components/datastax/astradb_chatmemory.py +40 -0
- lfx/components/datastax/astradb_cql.py +6 -32
- lfx/components/datastax/astradb_graph.py +10 -124
- lfx/components/datastax/astradb_tool.py +13 -53
- lfx/components/datastax/astradb_vectorstore.py +134 -977
- lfx/components/datastax/create_assistant.py +1 -0
- lfx/components/datastax/create_thread.py +1 -0
- lfx/components/datastax/dotenv.py +1 -0
- lfx/components/datastax/get_assistant.py +1 -0
- lfx/components/datastax/getenvvar.py +1 -0
- lfx/components/datastax/graph_rag.py +1 -1
- lfx/components/datastax/hcd.py +1 -1
- lfx/components/datastax/list_assistants.py +1 -0
- lfx/components/datastax/run.py +1 -0
- lfx/components/deactivated/json_document_builder.py +1 -1
- lfx/components/elastic/elasticsearch.py +1 -1
- lfx/components/elastic/opensearch_multimodal.py +1575 -0
- lfx/components/files_and_knowledge/__init__.py +47 -0
- lfx/components/{data → files_and_knowledge}/directory.py +1 -1
- lfx/components/{data → files_and_knowledge}/file.py +246 -18
- lfx/components/{knowledge_bases → files_and_knowledge}/ingestion.py +17 -9
- lfx/components/{knowledge_bases → files_and_knowledge}/retrieval.py +18 -10
- lfx/components/{data → files_and_knowledge}/save_file.py +142 -22
- lfx/components/flow_controls/__init__.py +58 -0
- lfx/components/{logic → flow_controls}/conditional_router.py +1 -1
- lfx/components/{logic → flow_controls}/loop.py +47 -9
- lfx/components/flow_controls/run_flow.py +108 -0
- lfx/components/glean/glean_search_api.py +1 -1
- lfx/components/groq/groq.py +35 -28
- lfx/components/helpers/__init__.py +102 -0
- lfx/components/ibm/watsonx.py +25 -21
- lfx/components/input_output/__init__.py +3 -1
- lfx/components/input_output/chat.py +12 -3
- lfx/components/input_output/chat_output.py +12 -4
- lfx/components/input_output/text.py +1 -1
- lfx/components/input_output/text_output.py +1 -1
- lfx/components/{data → input_output}/webhook.py +1 -1
- lfx/components/knowledge_bases/__init__.py +59 -4
- lfx/components/langchain_utilities/character.py +1 -1
- lfx/components/langchain_utilities/csv_agent.py +84 -16
- lfx/components/langchain_utilities/json_agent.py +67 -12
- lfx/components/langchain_utilities/language_recursive.py +1 -1
- lfx/components/llm_operations/__init__.py +46 -0
- lfx/components/{processing → llm_operations}/batch_run.py +1 -1
- lfx/components/{processing → llm_operations}/lambda_filter.py +1 -1
- lfx/components/{logic → llm_operations}/llm_conditional_router.py +1 -1
- lfx/components/{processing/llm_router.py → llm_operations/llm_selector.py} +3 -3
- lfx/components/{processing → llm_operations}/structured_output.py +56 -18
- lfx/components/logic/__init__.py +126 -0
- lfx/components/mem0/mem0_chat_memory.py +11 -0
- lfx/components/mistral/mistral_embeddings.py +1 -1
- lfx/components/models/__init__.py +64 -9
- lfx/components/models_and_agents/__init__.py +49 -0
- lfx/components/{agents → models_and_agents}/agent.py +49 -6
- lfx/components/models_and_agents/embedding_model.py +423 -0
- lfx/components/models_and_agents/language_model.py +398 -0
- lfx/components/{agents → models_and_agents}/mcp_component.py +84 -45
- lfx/components/{helpers → models_and_agents}/memory.py +1 -1
- lfx/components/nvidia/system_assist.py +1 -1
- lfx/components/olivya/olivya.py +1 -1
- lfx/components/ollama/ollama.py +235 -14
- lfx/components/openrouter/openrouter.py +49 -147
- lfx/components/processing/__init__.py +9 -57
- lfx/components/processing/converter.py +1 -1
- lfx/components/processing/dataframe_operations.py +1 -1
- lfx/components/processing/parse_json_data.py +2 -2
- lfx/components/processing/parser.py +7 -2
- lfx/components/processing/split_text.py +1 -1
- lfx/components/qdrant/qdrant.py +1 -1
- lfx/components/redis/redis.py +1 -1
- lfx/components/twelvelabs/split_video.py +10 -0
- lfx/components/twelvelabs/video_file.py +12 -0
- lfx/components/utilities/__init__.py +43 -0
- lfx/components/{helpers → utilities}/calculator_core.py +1 -1
- lfx/components/{helpers → utilities}/current_date.py +1 -1
- lfx/components/{processing → utilities}/python_repl_core.py +1 -1
- lfx/components/vectorstores/__init__.py +0 -6
- lfx/components/vectorstores/local_db.py +9 -0
- lfx/components/youtube/youtube_transcripts.py +118 -30
- lfx/custom/custom_component/component.py +60 -3
- lfx/custom/custom_component/custom_component.py +68 -6
- lfx/field_typing/constants.py +1 -0
- lfx/graph/edge/base.py +45 -22
- lfx/graph/graph/base.py +5 -2
- lfx/graph/graph/schema.py +3 -2
- lfx/graph/state/model.py +15 -2
- lfx/graph/utils.py +6 -0
- lfx/graph/vertex/base.py +4 -1
- lfx/graph/vertex/param_handler.py +10 -7
- lfx/graph/vertex/vertex_types.py +1 -1
- lfx/helpers/__init__.py +12 -0
- lfx/helpers/flow.py +117 -0
- lfx/inputs/input_mixin.py +24 -1
- lfx/inputs/inputs.py +13 -1
- lfx/interface/components.py +161 -83
- lfx/io/schema.py +6 -0
- lfx/log/logger.py +5 -3
- lfx/schema/schema.py +5 -0
- lfx/services/database/__init__.py +5 -0
- lfx/services/database/service.py +25 -0
- lfx/services/deps.py +87 -22
- lfx/services/manager.py +19 -6
- lfx/services/mcp_composer/service.py +998 -157
- lfx/services/session.py +5 -0
- lfx/services/settings/base.py +51 -7
- lfx/services/settings/constants.py +8 -0
- lfx/services/storage/local.py +76 -46
- lfx/services/storage/service.py +152 -29
- lfx/template/field/base.py +3 -0
- lfx/utils/ssrf_protection.py +384 -0
- lfx/utils/validate_cloud.py +26 -0
- {lfx_nightly-0.1.13.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/METADATA +38 -22
- {lfx_nightly-0.1.13.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/RECORD +210 -196
- {lfx_nightly-0.1.13.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/WHEEL +1 -1
- lfx/components/agents/cuga_agent.py +0 -1013
- lfx/components/datastax/astra_db.py +0 -77
- lfx/components/datastax/cassandra.py +0 -92
- lfx/components/logic/run_flow.py +0 -71
- lfx/components/models/embedding_model.py +0 -114
- lfx/components/models/language_model.py +0 -144
- lfx/components/vectorstores/astradb_graph.py +0 -326
- lfx/components/vectorstores/cassandra.py +0 -264
- lfx/components/vectorstores/cassandra_graph.py +0 -238
- lfx/components/vectorstores/chroma.py +0 -167
- lfx/components/vectorstores/clickhouse.py +0 -135
- lfx/components/vectorstores/couchbase.py +0 -102
- lfx/components/vectorstores/elasticsearch.py +0 -267
- lfx/components/vectorstores/faiss.py +0 -111
- lfx/components/vectorstores/graph_rag.py +0 -141
- lfx/components/vectorstores/hcd.py +0 -314
- lfx/components/vectorstores/milvus.py +0 -115
- lfx/components/vectorstores/mongodb_atlas.py +0 -213
- lfx/components/vectorstores/opensearch.py +0 -243
- lfx/components/vectorstores/pgvector.py +0 -72
- lfx/components/vectorstores/pinecone.py +0 -134
- lfx/components/vectorstores/qdrant.py +0 -109
- lfx/components/vectorstores/supabase.py +0 -76
- lfx/components/vectorstores/upstash.py +0 -124
- lfx/components/vectorstores/vectara.py +0 -97
- lfx/components/vectorstores/vectara_rag.py +0 -164
- lfx/components/vectorstores/weaviate.py +0 -89
- /lfx/components/{data → data_source}/mock_data.py +0 -0
- /lfx/components/datastax/{astra_vectorize.py → astradb_vectorize.py} +0 -0
- /lfx/components/{logic → flow_controls}/data_conditional_router.py +0 -0
- /lfx/components/{logic → flow_controls}/flow_tool.py +0 -0
- /lfx/components/{logic → flow_controls}/listen.py +0 -0
- /lfx/components/{logic → flow_controls}/notify.py +0 -0
- /lfx/components/{logic → flow_controls}/pass_message.py +0 -0
- /lfx/components/{logic → flow_controls}/sub_flow.py +0 -0
- /lfx/components/{processing → models_and_agents}/prompt.py +0 -0
- /lfx/components/{helpers → processing}/create_list.py +0 -0
- /lfx/components/{helpers → processing}/output_parser.py +0 -0
- /lfx/components/{helpers → processing}/store_message.py +0 -0
- /lfx/components/{helpers → utilities}/id_generator.py +0 -0
- {lfx_nightly-0.1.13.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/entry_points.txt +0 -0
lfx/base/agents/agent.py
CHANGED
|
@@ -5,12 +5,13 @@ from typing import TYPE_CHECKING, cast
|
|
|
5
5
|
|
|
6
6
|
from langchain.agents import AgentExecutor, BaseMultiActionAgent, BaseSingleActionAgent
|
|
7
7
|
from langchain.agents.agent import RunnableAgent
|
|
8
|
-
from
|
|
8
|
+
from langchain.callbacks.base import BaseCallbackHandler
|
|
9
|
+
from langchain_core.messages import BaseMessage, HumanMessage
|
|
9
10
|
from langchain_core.runnables import Runnable
|
|
10
11
|
|
|
11
12
|
from lfx.base.agents.callback import AgentAsyncHandler
|
|
12
13
|
from lfx.base.agents.events import ExceptionWithMessageError, process_agent_events
|
|
13
|
-
from lfx.base.agents.utils import
|
|
14
|
+
from lfx.base.agents.utils import get_chat_output_sender_name
|
|
14
15
|
from lfx.custom.custom_component.component import Component, _get_component_toolkit
|
|
15
16
|
from lfx.field_typing import Tool
|
|
16
17
|
from lfx.inputs.inputs import InputTypes, MultilineInput
|
|
@@ -19,14 +20,13 @@ from lfx.log.logger import logger
|
|
|
19
20
|
from lfx.memory import delete_message
|
|
20
21
|
from lfx.schema.content_block import ContentBlock
|
|
21
22
|
from lfx.schema.data import Data
|
|
23
|
+
from lfx.schema.log import OnTokenFunctionType
|
|
22
24
|
from lfx.schema.message import Message
|
|
23
25
|
from lfx.template.field.base import Output
|
|
24
26
|
from lfx.utils.constants import MESSAGE_SENDER_AI
|
|
25
27
|
|
|
26
28
|
if TYPE_CHECKING:
|
|
27
|
-
from
|
|
28
|
-
|
|
29
|
-
from lfx.schema.log import SendMessageFunctionType
|
|
29
|
+
from lfx.schema.log import OnTokenFunctionType, SendMessageFunctionType
|
|
30
30
|
|
|
31
31
|
|
|
32
32
|
DEFAULT_TOOLS_DESCRIPTION = "A helpful assistant with access to the following tools:"
|
|
@@ -75,6 +75,12 @@ class LCAgentComponent(Component):
|
|
|
75
75
|
Output(display_name="Response", name="response", method="message_response"),
|
|
76
76
|
]
|
|
77
77
|
|
|
78
|
+
# Get shared callbacks for tracing and save them to self.shared_callbacks
|
|
79
|
+
def _get_shared_callbacks(self) -> list[BaseCallbackHandler]:
|
|
80
|
+
if not hasattr(self, "shared_callbacks"):
|
|
81
|
+
self.shared_callbacks = self.get_langchain_callbacks()
|
|
82
|
+
return self.shared_callbacks
|
|
83
|
+
|
|
78
84
|
@abstractmethod
|
|
79
85
|
def build_agent(self) -> AgentExecutor:
|
|
80
86
|
"""Create the agent."""
|
|
@@ -119,6 +125,24 @@ class LCAgentComponent(Component):
|
|
|
119
125
|
# might be overridden in subclasses
|
|
120
126
|
return None
|
|
121
127
|
|
|
128
|
+
def _data_to_messages_skip_empty(self, data: list[Data]) -> list[BaseMessage]:
|
|
129
|
+
"""Convert data to messages, filtering only empty text while preserving non-text content.
|
|
130
|
+
|
|
131
|
+
Note: added to fix issue with certain providers failing when given empty text as input.
|
|
132
|
+
"""
|
|
133
|
+
messages = []
|
|
134
|
+
for value in data:
|
|
135
|
+
# Only skip if the message has a text attribute that is empty/whitespace
|
|
136
|
+
text = getattr(value, "text", None)
|
|
137
|
+
if isinstance(text, str) and not text.strip():
|
|
138
|
+
# Skip only messages with empty/whitespace-only text strings
|
|
139
|
+
continue
|
|
140
|
+
|
|
141
|
+
lc_message = value.to_lc_message()
|
|
142
|
+
messages.append(lc_message)
|
|
143
|
+
|
|
144
|
+
return messages
|
|
145
|
+
|
|
122
146
|
async def run_agent(
|
|
123
147
|
self,
|
|
124
148
|
agent: Runnable | BaseSingleActionAgent | BaseMultiActionAgent | AgentExecutor,
|
|
@@ -138,41 +162,76 @@ class LCAgentComponent(Component):
|
|
|
138
162
|
max_iterations=max_iterations,
|
|
139
163
|
)
|
|
140
164
|
# Convert input_value to proper format for agent
|
|
141
|
-
|
|
165
|
+
lc_message = None
|
|
166
|
+
if isinstance(self.input_value, Message):
|
|
142
167
|
lc_message = self.input_value.to_lc_message()
|
|
143
|
-
|
|
168
|
+
# Extract text content from the LangChain message for agent input
|
|
169
|
+
# Agents expect a string input, not a Message object
|
|
170
|
+
if hasattr(lc_message, "content"):
|
|
171
|
+
if isinstance(lc_message.content, str):
|
|
172
|
+
input_dict: dict[str, str | list[BaseMessage] | BaseMessage] = {"input": lc_message.content}
|
|
173
|
+
elif isinstance(lc_message.content, list):
|
|
174
|
+
# For multimodal content, extract text parts
|
|
175
|
+
text_parts = [item.get("text", "") for item in lc_message.content if item.get("type") == "text"]
|
|
176
|
+
input_dict = {"input": " ".join(text_parts) if text_parts else ""}
|
|
177
|
+
else:
|
|
178
|
+
input_dict = {"input": str(lc_message.content)}
|
|
179
|
+
else:
|
|
180
|
+
input_dict = {"input": str(lc_message)}
|
|
144
181
|
else:
|
|
145
|
-
|
|
146
|
-
input_text = self.input_value
|
|
182
|
+
input_dict = {"input": self.input_value}
|
|
147
183
|
|
|
148
|
-
input_dict: dict[str, str | list[BaseMessage]] = {}
|
|
149
184
|
if hasattr(self, "system_prompt"):
|
|
150
185
|
input_dict["system_prompt"] = self.system_prompt
|
|
151
|
-
if hasattr(self, "chat_history") and self.chat_history:
|
|
152
|
-
if (
|
|
153
|
-
hasattr(self.chat_history, "to_data")
|
|
154
|
-
and callable(self.chat_history.to_data)
|
|
155
|
-
and self.chat_history.__class__.__name__ == "Data"
|
|
156
|
-
):
|
|
157
|
-
input_dict["chat_history"] = data_to_messages(self.chat_history)
|
|
158
|
-
# Handle both lfx.schema.message.Message and langflow.schema.message.Message types
|
|
159
|
-
if all(hasattr(m, "to_data") and callable(m.to_data) and "text" in m.data for m in self.chat_history):
|
|
160
|
-
input_dict["chat_history"] = data_to_messages(self.chat_history)
|
|
161
|
-
if all(isinstance(m, Message) for m in self.chat_history):
|
|
162
|
-
input_dict["chat_history"] = data_to_messages([m.to_data() for m in self.chat_history])
|
|
163
|
-
if hasattr(lc_message, "content") and isinstance(lc_message.content, list):
|
|
164
|
-
# ! Because the input has to be a string, we must pass the images in the chat_history
|
|
165
186
|
|
|
187
|
+
if hasattr(self, "chat_history") and self.chat_history:
|
|
188
|
+
if isinstance(self.chat_history, Data):
|
|
189
|
+
input_dict["chat_history"] = self._data_to_messages_skip_empty([self.chat_history])
|
|
190
|
+
elif all(hasattr(m, "to_data") and callable(m.to_data) and "text" in m.data for m in self.chat_history):
|
|
191
|
+
input_dict["chat_history"] = self._data_to_messages_skip_empty(self.chat_history)
|
|
192
|
+
elif all(isinstance(m, Message) for m in self.chat_history):
|
|
193
|
+
input_dict["chat_history"] = self._data_to_messages_skip_empty([m.to_data() for m in self.chat_history])
|
|
194
|
+
|
|
195
|
+
# Handle multimodal input (images + text)
|
|
196
|
+
# Note: Agent input must be a string, so we extract text and move images to chat_history
|
|
197
|
+
if lc_message is not None and hasattr(lc_message, "content") and isinstance(lc_message.content, list):
|
|
198
|
+
# Extract images and text from the text content items
|
|
166
199
|
image_dicts = [item for item in lc_message.content if item.get("type") == "image"]
|
|
167
|
-
|
|
200
|
+
text_content = [item for item in lc_message.content if item.get("type") != "image"]
|
|
201
|
+
|
|
202
|
+
text_strings = [
|
|
203
|
+
item.get("text", "")
|
|
204
|
+
for item in text_content
|
|
205
|
+
if item.get("type") == "text" and item.get("text", "").strip()
|
|
206
|
+
]
|
|
207
|
+
|
|
208
|
+
# Set input to concatenated text or empty string
|
|
209
|
+
input_dict["input"] = " ".join(text_strings) if text_strings else ""
|
|
210
|
+
|
|
211
|
+
# If input is still a list or empty, provide a default
|
|
212
|
+
if isinstance(input_dict["input"], list) or not input_dict["input"]:
|
|
213
|
+
input_dict["input"] = "Process the provided images."
|
|
168
214
|
|
|
169
215
|
if "chat_history" not in input_dict:
|
|
170
216
|
input_dict["chat_history"] = []
|
|
217
|
+
|
|
171
218
|
if isinstance(input_dict["chat_history"], list):
|
|
172
219
|
input_dict["chat_history"].extend(HumanMessage(content=[image_dict]) for image_dict in image_dicts)
|
|
173
220
|
else:
|
|
174
221
|
input_dict["chat_history"] = [HumanMessage(content=[image_dict]) for image_dict in image_dicts]
|
|
175
|
-
|
|
222
|
+
|
|
223
|
+
# Final safety check: ensure input is never empty (prevents Anthropic API errors)
|
|
224
|
+
current_input = input_dict.get("input", "")
|
|
225
|
+
if isinstance(current_input, list):
|
|
226
|
+
current_input = " ".join(map(str, current_input))
|
|
227
|
+
elif not isinstance(current_input, str):
|
|
228
|
+
current_input = str(current_input)
|
|
229
|
+
|
|
230
|
+
if not current_input.strip():
|
|
231
|
+
input_dict["input"] = "Continue the conversation."
|
|
232
|
+
else:
|
|
233
|
+
input_dict["input"] = current_input
|
|
234
|
+
|
|
176
235
|
if hasattr(self, "graph"):
|
|
177
236
|
session_id = self.graph.session_id
|
|
178
237
|
elif hasattr(self, "_session_id"):
|
|
@@ -181,7 +240,6 @@ class LCAgentComponent(Component):
|
|
|
181
240
|
session_id = None
|
|
182
241
|
|
|
183
242
|
sender_name = get_chat_output_sender_name(self) or self.display_name or "AI"
|
|
184
|
-
|
|
185
243
|
agent_message = Message(
|
|
186
244
|
sender=MESSAGE_SENDER_AI,
|
|
187
245
|
sender_name=sender_name,
|
|
@@ -189,15 +247,24 @@ class LCAgentComponent(Component):
|
|
|
189
247
|
content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
|
|
190
248
|
session_id=session_id or uuid.uuid4(),
|
|
191
249
|
)
|
|
250
|
+
|
|
251
|
+
# Create token callback if event_manager is available
|
|
252
|
+
# This wraps the event_manager's on_token method to match OnTokenFunctionType Protocol
|
|
253
|
+
on_token_callback: OnTokenFunctionType | None = None
|
|
254
|
+
if self._event_manager:
|
|
255
|
+
on_token_callback = cast("OnTokenFunctionType", self._event_manager.on_token)
|
|
256
|
+
|
|
192
257
|
try:
|
|
193
258
|
result = await process_agent_events(
|
|
194
259
|
runnable.astream_events(
|
|
195
260
|
input_dict,
|
|
196
|
-
|
|
261
|
+
# here we use the shared callbacks because the AgentExecutor uses the tools
|
|
262
|
+
config={"callbacks": [AgentAsyncHandler(self.log), *self._get_shared_callbacks()]},
|
|
197
263
|
version="v2",
|
|
198
264
|
),
|
|
199
265
|
agent_message,
|
|
200
266
|
cast("SendMessageFunctionType", self.send_message),
|
|
267
|
+
on_token_callback,
|
|
201
268
|
)
|
|
202
269
|
except ExceptionWithMessageError as e:
|
|
203
270
|
if hasattr(e, "agent_message") and hasattr(e.agent_message, "id"):
|
|
@@ -269,15 +336,40 @@ class LCToolsAgentComponent(LCAgentComponent):
|
|
|
269
336
|
tools_names = ", ".join([tool.name for tool in self.tools])
|
|
270
337
|
return tools_names
|
|
271
338
|
|
|
339
|
+
# Set shared callbacks for tracing
|
|
340
|
+
def set_tools_callbacks(self, tools_list: list[Tool], callbacks_list: list[BaseCallbackHandler]):
|
|
341
|
+
"""Set shared callbacks for tracing to the tools.
|
|
342
|
+
|
|
343
|
+
If we do not pass down the same callbacks to each tool
|
|
344
|
+
used by the agent, then each tool will instantiate a new callback.
|
|
345
|
+
For some tracing services, this will cause
|
|
346
|
+
the callback handler to lose the id of its parent run (Agent)
|
|
347
|
+
and thus throw an error in the tracing service client.
|
|
348
|
+
|
|
349
|
+
Args:
|
|
350
|
+
tools_list: list of tools to set the callbacks for
|
|
351
|
+
callbacks_list: list of callbacks to set for the tools
|
|
352
|
+
Returns:
|
|
353
|
+
None
|
|
354
|
+
"""
|
|
355
|
+
for tool in tools_list or []:
|
|
356
|
+
if hasattr(tool, "callbacks"):
|
|
357
|
+
tool.callbacks = callbacks_list
|
|
358
|
+
|
|
272
359
|
async def _get_tools(self) -> list[Tool]:
|
|
273
360
|
component_toolkit = _get_component_toolkit()
|
|
274
361
|
tools_names = self._build_tools_names()
|
|
275
362
|
agent_description = self.get_tool_description()
|
|
276
363
|
# TODO: Agent Description Depreciated Feature to be removed
|
|
277
364
|
description = f"{agent_description}{tools_names}"
|
|
365
|
+
|
|
278
366
|
tools = component_toolkit(component=self).get_tools(
|
|
279
|
-
tool_name=self.get_tool_name(),
|
|
367
|
+
tool_name=self.get_tool_name(),
|
|
368
|
+
tool_description=description,
|
|
369
|
+
# here we do not use the shared callbacks as we are exposing the agent as a tool
|
|
370
|
+
callbacks=self.get_langchain_callbacks(),
|
|
280
371
|
)
|
|
281
372
|
if hasattr(self, "tools_metadata"):
|
|
282
373
|
tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)
|
|
374
|
+
|
|
283
375
|
return tools
|
|
@@ -0,0 +1,380 @@
|
|
|
1
|
+
"""Reusable base classes for ALTK agent components and tool wrappers.
|
|
2
|
+
|
|
3
|
+
This module abstracts common orchestration so concrete components can focus
|
|
4
|
+
on user-facing configuration and small customizations.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import uuid
|
|
10
|
+
from abc import ABC, abstractmethod
|
|
11
|
+
from typing import TYPE_CHECKING, Any, cast
|
|
12
|
+
|
|
13
|
+
from altk.core.llm import get_llm
|
|
14
|
+
from langchain.agents import AgentExecutor, BaseMultiActionAgent, BaseSingleActionAgent
|
|
15
|
+
from langchain_anthropic.chat_models import ChatAnthropic
|
|
16
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
|
17
|
+
from langchain_core.messages import BaseMessage, HumanMessage
|
|
18
|
+
from langchain_core.runnables import Runnable, RunnableBinding
|
|
19
|
+
from langchain_core.tools import BaseTool
|
|
20
|
+
from langchain_openai.chat_models.base import ChatOpenAI
|
|
21
|
+
from pydantic import Field
|
|
22
|
+
|
|
23
|
+
from lfx.base.agents.callback import AgentAsyncHandler
|
|
24
|
+
from lfx.base.agents.events import ExceptionWithMessageError, process_agent_events
|
|
25
|
+
from lfx.base.agents.utils import data_to_messages, get_chat_output_sender_name
|
|
26
|
+
from lfx.components.models_and_agents import AgentComponent
|
|
27
|
+
from lfx.log.logger import logger
|
|
28
|
+
from lfx.memory import delete_message
|
|
29
|
+
from lfx.schema.content_block import ContentBlock
|
|
30
|
+
from lfx.schema.data import Data
|
|
31
|
+
|
|
32
|
+
if TYPE_CHECKING:
|
|
33
|
+
from collections.abc import Sequence
|
|
34
|
+
|
|
35
|
+
from lfx.schema.log import SendMessageFunctionType
|
|
36
|
+
|
|
37
|
+
from lfx.schema.message import Message
|
|
38
|
+
from lfx.utils.constants import MESSAGE_SENDER_AI
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def normalize_message_content(message: BaseMessage) -> str:
|
|
42
|
+
"""Normalize message content to handle inconsistent formats from Data.to_lc_message().
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
message: A BaseMessage that may have content as either:
|
|
46
|
+
- str (for AI messages)
|
|
47
|
+
- list[dict] (for User messages in format [{"type": "text", "text": "..."}])
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
str: The extracted text content
|
|
51
|
+
|
|
52
|
+
Note:
|
|
53
|
+
This addresses the inconsistency in lfx.schema.data.Data.to_lc_message() where:
|
|
54
|
+
- User messages: content = [{"type": "text", "text": text}] (list format)
|
|
55
|
+
- AI messages: content = text (string format)
|
|
56
|
+
"""
|
|
57
|
+
content = message.content
|
|
58
|
+
|
|
59
|
+
# Handle string format (AI messages)
|
|
60
|
+
if isinstance(content, str):
|
|
61
|
+
return content
|
|
62
|
+
|
|
63
|
+
# Handle list format (User messages)
|
|
64
|
+
if isinstance(content, list) and len(content) > 0:
|
|
65
|
+
# Extract text from first content block that has 'text' field
|
|
66
|
+
for item in content:
|
|
67
|
+
if isinstance(item, dict) and item.get("type") == "text" and "text" in item:
|
|
68
|
+
return item["text"]
|
|
69
|
+
# If no text found, return empty string (e.g., image-only messages)
|
|
70
|
+
return ""
|
|
71
|
+
|
|
72
|
+
# Handle empty list or other formats
|
|
73
|
+
if isinstance(content, list):
|
|
74
|
+
return ""
|
|
75
|
+
|
|
76
|
+
# Fallback for any other format
|
|
77
|
+
return str(content)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
# === Base Tool Wrapper Architecture ===
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class BaseToolWrapper(ABC):
|
|
84
|
+
"""Base class for all tool wrappers in the pipeline.
|
|
85
|
+
|
|
86
|
+
Tool wrappers can enhance tools by adding pre-execution validation,
|
|
87
|
+
post-execution processing, or other capabilities.
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
@abstractmethod
|
|
91
|
+
def wrap_tool(self, tool: BaseTool, **kwargs) -> BaseTool:
|
|
92
|
+
"""Wrap a tool with enhanced functionality."""
|
|
93
|
+
|
|
94
|
+
def initialize(self, **_kwargs) -> bool: # pragma: no cover - trivial
|
|
95
|
+
"""Initialize any resources needed by the wrapper."""
|
|
96
|
+
return True
|
|
97
|
+
|
|
98
|
+
@property
|
|
99
|
+
def is_available(self) -> bool: # pragma: no cover - trivial
|
|
100
|
+
"""Check if the wrapper is available for use."""
|
|
101
|
+
return True
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
class ALTKBaseTool(BaseTool):
|
|
105
|
+
"""Base class for tools that need agent interaction and ALTK LLM access.
|
|
106
|
+
|
|
107
|
+
Provides common functionality for tool execution and ALTK LLM object creation.
|
|
108
|
+
"""
|
|
109
|
+
|
|
110
|
+
name: str = Field(...)
|
|
111
|
+
description: str = Field(...)
|
|
112
|
+
wrapped_tool: BaseTool = Field(...)
|
|
113
|
+
agent: Runnable | BaseSingleActionAgent | BaseMultiActionAgent | AgentExecutor = Field(...)
|
|
114
|
+
|
|
115
|
+
def _run(self, *args, **kwargs) -> str:
|
|
116
|
+
"""Abstract method implementation that uses the wrapped tool execution."""
|
|
117
|
+
return self._execute_tool(*args, **kwargs)
|
|
118
|
+
|
|
119
|
+
def _execute_tool(self, *args, **kwargs) -> str:
|
|
120
|
+
"""Execute the wrapped tool with compatibility across LC versions."""
|
|
121
|
+
# BaseTool.run() expects tool_input as first argument
|
|
122
|
+
if args:
|
|
123
|
+
# Use first arg as tool_input, pass remaining args
|
|
124
|
+
tool_input = args[0]
|
|
125
|
+
return self.wrapped_tool.run(tool_input, *args[1:])
|
|
126
|
+
if kwargs:
|
|
127
|
+
# Use kwargs dict as tool_input
|
|
128
|
+
return self.wrapped_tool.run(kwargs)
|
|
129
|
+
# No arguments - pass empty dict as tool_input
|
|
130
|
+
return self.wrapped_tool.run({})
|
|
131
|
+
|
|
132
|
+
def _get_altk_llm_object(self, *, use_output_val: bool = True) -> Any:
|
|
133
|
+
"""Extract the underlying LLM and map it to an ALTK client object."""
|
|
134
|
+
llm_object: BaseChatModel | None = None
|
|
135
|
+
steps = getattr(self.agent, "steps", None)
|
|
136
|
+
if steps:
|
|
137
|
+
for step in steps:
|
|
138
|
+
if isinstance(step, RunnableBinding) and isinstance(step.bound, BaseChatModel):
|
|
139
|
+
llm_object = step.bound
|
|
140
|
+
break
|
|
141
|
+
|
|
142
|
+
if isinstance(llm_object, ChatAnthropic):
|
|
143
|
+
model_name = f"anthropic/{llm_object.model}"
|
|
144
|
+
api_key = llm_object.anthropic_api_key.get_secret_value()
|
|
145
|
+
llm_client_type = "litellm.output_val" if use_output_val else "litellm"
|
|
146
|
+
llm_client = get_llm(llm_client_type)
|
|
147
|
+
llm_client_obj = llm_client(model_name=model_name, api_key=api_key)
|
|
148
|
+
elif isinstance(llm_object, ChatOpenAI):
|
|
149
|
+
model_name = llm_object.model_name
|
|
150
|
+
api_key = llm_object.openai_api_key.get_secret_value()
|
|
151
|
+
llm_client_type = "openai.sync.output_val" if use_output_val else "openai.sync"
|
|
152
|
+
llm_client = get_llm(llm_client_type)
|
|
153
|
+
llm_client_obj = llm_client(model=model_name, api_key=api_key)
|
|
154
|
+
else:
|
|
155
|
+
logger.info("ALTK currently only supports OpenAI and Anthropic models through Langflow.")
|
|
156
|
+
llm_client_obj = None
|
|
157
|
+
|
|
158
|
+
return llm_client_obj
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
class ToolPipelineManager:
|
|
162
|
+
"""Manages a sequence of tool wrappers and applies them to tools."""
|
|
163
|
+
|
|
164
|
+
def __init__(self):
|
|
165
|
+
self.wrappers: list[BaseToolWrapper] = []
|
|
166
|
+
|
|
167
|
+
def clear(self) -> None:
|
|
168
|
+
self.wrappers.clear()
|
|
169
|
+
|
|
170
|
+
def add_wrapper(self, wrapper: BaseToolWrapper) -> None:
|
|
171
|
+
self.wrappers.append(wrapper)
|
|
172
|
+
|
|
173
|
+
def configure_wrappers(self, wrappers: list[BaseToolWrapper]) -> None:
|
|
174
|
+
"""Replace current wrappers with new configuration."""
|
|
175
|
+
self.clear()
|
|
176
|
+
for wrapper in wrappers:
|
|
177
|
+
self.add_wrapper(wrapper)
|
|
178
|
+
|
|
179
|
+
def process_tools(self, tools: list[BaseTool], **kwargs) -> list[BaseTool]:
|
|
180
|
+
return [self._apply_wrappers_to_tool(tool, **kwargs) for tool in tools]
|
|
181
|
+
|
|
182
|
+
def _apply_wrappers_to_tool(self, tool: BaseTool, **kwargs) -> BaseTool:
|
|
183
|
+
wrapped_tool = tool
|
|
184
|
+
for wrapper in reversed(self.wrappers):
|
|
185
|
+
if wrapper.is_available:
|
|
186
|
+
wrapped_tool = wrapper.wrap_tool(wrapped_tool, **kwargs)
|
|
187
|
+
return wrapped_tool
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
# === Base Agent Component Orchestration ===
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
class ALTKBaseAgentComponent(AgentComponent):
|
|
194
|
+
"""Base agent component that centralizes orchestration and hooks.
|
|
195
|
+
|
|
196
|
+
Subclasses should override `get_tool_wrappers` to provide their wrappers
|
|
197
|
+
and can customize context building if needed.
|
|
198
|
+
"""
|
|
199
|
+
|
|
200
|
+
def __init__(self, **kwargs):
|
|
201
|
+
super().__init__(**kwargs)
|
|
202
|
+
self.pipeline_manager = ToolPipelineManager()
|
|
203
|
+
|
|
204
|
+
# ---- Hooks for subclasses ----
|
|
205
|
+
def configure_tool_pipeline(self) -> None:
|
|
206
|
+
"""Configure the tool pipeline with wrappers. Subclasses override this."""
|
|
207
|
+
# Default: no wrappers
|
|
208
|
+
self.pipeline_manager.clear()
|
|
209
|
+
|
|
210
|
+
def build_conversation_context(self) -> list[BaseMessage]:
|
|
211
|
+
"""Create conversation context from input and chat history."""
|
|
212
|
+
context: list[BaseMessage] = []
|
|
213
|
+
|
|
214
|
+
# Add chat history to maintain chronological order
|
|
215
|
+
if hasattr(self, "chat_history") and self.chat_history:
|
|
216
|
+
if isinstance(self.chat_history, Data):
|
|
217
|
+
context.append(self.chat_history.to_lc_message())
|
|
218
|
+
elif isinstance(self.chat_history, list):
|
|
219
|
+
if all(isinstance(m, Message) for m in self.chat_history):
|
|
220
|
+
context.extend([m.to_lc_message() for m in self.chat_history])
|
|
221
|
+
else:
|
|
222
|
+
# Assume list of Data objects, let data_to_messages handle validation
|
|
223
|
+
try:
|
|
224
|
+
context.extend(data_to_messages(self.chat_history))
|
|
225
|
+
except (AttributeError, TypeError) as e:
|
|
226
|
+
error_message = f"Invalid chat_history list contents: {e}"
|
|
227
|
+
raise ValueError(error_message) from e
|
|
228
|
+
else:
|
|
229
|
+
# Reject all other types (strings, numbers, etc.)
|
|
230
|
+
type_name = type(self.chat_history).__name__
|
|
231
|
+
error_message = (
|
|
232
|
+
f"chat_history must be a Data object, list of Data/Message objects, or None. Got: {type_name}"
|
|
233
|
+
)
|
|
234
|
+
raise ValueError(error_message)
|
|
235
|
+
|
|
236
|
+
# Then add current input to maintain chronological order
|
|
237
|
+
if hasattr(self, "input_value") and self.input_value:
|
|
238
|
+
if isinstance(self.input_value, Message):
|
|
239
|
+
context.append(self.input_value.to_lc_message())
|
|
240
|
+
else:
|
|
241
|
+
context.append(HumanMessage(content=str(self.input_value)))
|
|
242
|
+
|
|
243
|
+
return context
|
|
244
|
+
|
|
245
|
+
def get_user_query(self) -> str:
|
|
246
|
+
if hasattr(self.input_value, "get_text") and callable(self.input_value.get_text):
|
|
247
|
+
return self.input_value.get_text()
|
|
248
|
+
return str(self.input_value)
|
|
249
|
+
|
|
250
|
+
# ---- Internal helpers reused by run/update ----
|
|
251
|
+
def _initialize_tool_pipeline(self) -> None:
|
|
252
|
+
"""Initialize the tool pipeline by calling the subclass configuration."""
|
|
253
|
+
self.configure_tool_pipeline()
|
|
254
|
+
|
|
255
|
+
def update_runnable_instance(
|
|
256
|
+
self, agent: AgentExecutor, runnable: AgentExecutor, tools: Sequence[BaseTool]
|
|
257
|
+
) -> AgentExecutor:
|
|
258
|
+
"""Update the runnable instance with processed tools.
|
|
259
|
+
|
|
260
|
+
Subclasses can override this method to customize tool processing.
|
|
261
|
+
The default implementation applies the tool wrapper pipeline.
|
|
262
|
+
"""
|
|
263
|
+
user_query = self.get_user_query()
|
|
264
|
+
conversation_context = self.build_conversation_context()
|
|
265
|
+
|
|
266
|
+
self._initialize_tool_pipeline()
|
|
267
|
+
processed_tools = self.pipeline_manager.process_tools(
|
|
268
|
+
list(tools or []),
|
|
269
|
+
agent=agent,
|
|
270
|
+
user_query=user_query,
|
|
271
|
+
conversation_context=conversation_context,
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
runnable.tools = processed_tools
|
|
275
|
+
return runnable
|
|
276
|
+
|
|
277
|
+
async def run_agent(
|
|
278
|
+
self,
|
|
279
|
+
agent: Runnable | BaseSingleActionAgent | BaseMultiActionAgent | AgentExecutor,
|
|
280
|
+
) -> Message:
|
|
281
|
+
if isinstance(agent, AgentExecutor):
|
|
282
|
+
runnable = agent
|
|
283
|
+
else:
|
|
284
|
+
# note the tools are not required to run the agent, hence the validation removed.
|
|
285
|
+
handle_parsing_errors = hasattr(self, "handle_parsing_errors") and self.handle_parsing_errors
|
|
286
|
+
verbose = hasattr(self, "verbose") and self.verbose
|
|
287
|
+
max_iterations = hasattr(self, "max_iterations") and self.max_iterations
|
|
288
|
+
runnable = AgentExecutor.from_agent_and_tools(
|
|
289
|
+
agent=agent,
|
|
290
|
+
tools=self.tools or [],
|
|
291
|
+
handle_parsing_errors=handle_parsing_errors,
|
|
292
|
+
verbose=verbose,
|
|
293
|
+
max_iterations=max_iterations,
|
|
294
|
+
)
|
|
295
|
+
runnable = self.update_runnable_instance(agent, runnable, self.tools)
|
|
296
|
+
|
|
297
|
+
# Convert input_value to proper format for agent
|
|
298
|
+
if hasattr(self.input_value, "to_lc_message") and callable(self.input_value.to_lc_message):
|
|
299
|
+
lc_message = self.input_value.to_lc_message()
|
|
300
|
+
input_text = lc_message.content if hasattr(lc_message, "content") else str(lc_message)
|
|
301
|
+
else:
|
|
302
|
+
lc_message = None
|
|
303
|
+
input_text = self.input_value
|
|
304
|
+
|
|
305
|
+
input_dict: dict[str, str | list[BaseMessage]] = {}
|
|
306
|
+
if hasattr(self, "system_prompt"):
|
|
307
|
+
input_dict["system_prompt"] = self.system_prompt
|
|
308
|
+
if hasattr(self, "chat_history") and self.chat_history:
|
|
309
|
+
if (
|
|
310
|
+
hasattr(self.chat_history, "to_data")
|
|
311
|
+
and callable(self.chat_history.to_data)
|
|
312
|
+
and self.chat_history.__class__.__name__ == "Data"
|
|
313
|
+
):
|
|
314
|
+
input_dict["chat_history"] = data_to_messages(self.chat_history)
|
|
315
|
+
# Handle both lfx.schema.message.Message and langflow.schema.message.Message types
|
|
316
|
+
if all(hasattr(m, "to_data") and callable(m.to_data) and "text" in m.data for m in self.chat_history):
|
|
317
|
+
input_dict["chat_history"] = data_to_messages(self.chat_history)
|
|
318
|
+
if all(isinstance(m, Message) for m in self.chat_history):
|
|
319
|
+
input_dict["chat_history"] = data_to_messages([m.to_data() for m in self.chat_history])
|
|
320
|
+
if hasattr(lc_message, "content") and isinstance(lc_message.content, list):
|
|
321
|
+
# ! Because the input has to be a string, we must pass the images in the chat_history
|
|
322
|
+
|
|
323
|
+
image_dicts = [item for item in lc_message.content if item.get("type") == "image"]
|
|
324
|
+
lc_message.content = [item for item in lc_message.content if item.get("type") != "image"]
|
|
325
|
+
|
|
326
|
+
if "chat_history" not in input_dict:
|
|
327
|
+
input_dict["chat_history"] = []
|
|
328
|
+
if isinstance(input_dict["chat_history"], list):
|
|
329
|
+
input_dict["chat_history"].extend(HumanMessage(content=[image_dict]) for image_dict in image_dicts)
|
|
330
|
+
else:
|
|
331
|
+
input_dict["chat_history"] = [HumanMessage(content=[image_dict]) for image_dict in image_dicts]
|
|
332
|
+
input_dict["input"] = input_text
|
|
333
|
+
if hasattr(self, "graph"):
|
|
334
|
+
session_id = self.graph.session_id
|
|
335
|
+
elif hasattr(self, "_session_id"):
|
|
336
|
+
session_id = self._session_id
|
|
337
|
+
else:
|
|
338
|
+
session_id = None
|
|
339
|
+
|
|
340
|
+
try:
|
|
341
|
+
sender_name = get_chat_output_sender_name(self)
|
|
342
|
+
except AttributeError:
|
|
343
|
+
sender_name = self.display_name or "AI"
|
|
344
|
+
|
|
345
|
+
agent_message = Message(
|
|
346
|
+
sender=MESSAGE_SENDER_AI,
|
|
347
|
+
sender_name=sender_name,
|
|
348
|
+
properties={"icon": "Bot", "state": "partial"},
|
|
349
|
+
content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
|
|
350
|
+
session_id=session_id or uuid.uuid4(),
|
|
351
|
+
)
|
|
352
|
+
try:
|
|
353
|
+
result = await process_agent_events(
|
|
354
|
+
runnable.astream_events(
|
|
355
|
+
input_dict,
|
|
356
|
+
config={
|
|
357
|
+
"callbacks": [
|
|
358
|
+
AgentAsyncHandler(self.log),
|
|
359
|
+
*self.get_langchain_callbacks(),
|
|
360
|
+
]
|
|
361
|
+
},
|
|
362
|
+
version="v2",
|
|
363
|
+
),
|
|
364
|
+
agent_message,
|
|
365
|
+
cast("SendMessageFunctionType", self.send_message),
|
|
366
|
+
)
|
|
367
|
+
except ExceptionWithMessageError as e:
|
|
368
|
+
if hasattr(e, "agent_message") and hasattr(e.agent_message, "id"):
|
|
369
|
+
msg_id = e.agent_message.id
|
|
370
|
+
await delete_message(id_=msg_id)
|
|
371
|
+
await self._send_message_event(e.agent_message, category="remove_message")
|
|
372
|
+
logger.error(f"ExceptionWithMessageError: {e}")
|
|
373
|
+
raise
|
|
374
|
+
except Exception as e:
|
|
375
|
+
# Log or handle any other exceptions
|
|
376
|
+
logger.error(f"Error: {e}")
|
|
377
|
+
raise
|
|
378
|
+
|
|
379
|
+
self.status = result
|
|
380
|
+
return result
|