lfx-nightly 0.2.0.dev0__py3-none-any.whl → 0.2.0.dev26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (188) hide show
  1. lfx/_assets/component_index.json +1 -1
  2. lfx/base/agents/agent.py +13 -1
  3. lfx/base/agents/altk_base_agent.py +380 -0
  4. lfx/base/agents/altk_tool_wrappers.py +565 -0
  5. lfx/base/agents/events.py +2 -1
  6. lfx/base/composio/composio_base.py +159 -224
  7. lfx/base/data/base_file.py +88 -21
  8. lfx/base/data/storage_utils.py +192 -0
  9. lfx/base/data/utils.py +178 -14
  10. lfx/base/embeddings/embeddings_class.py +113 -0
  11. lfx/base/models/groq_constants.py +74 -58
  12. lfx/base/models/groq_model_discovery.py +265 -0
  13. lfx/base/models/model.py +1 -1
  14. lfx/base/models/model_utils.py +100 -0
  15. lfx/base/models/openai_constants.py +7 -0
  16. lfx/base/models/watsonx_constants.py +32 -8
  17. lfx/base/tools/run_flow.py +601 -129
  18. lfx/cli/commands.py +6 -3
  19. lfx/cli/common.py +2 -2
  20. lfx/cli/run.py +1 -1
  21. lfx/cli/script_loader.py +53 -11
  22. lfx/components/Notion/create_page.py +1 -1
  23. lfx/components/Notion/list_database_properties.py +1 -1
  24. lfx/components/Notion/list_pages.py +1 -1
  25. lfx/components/Notion/list_users.py +1 -1
  26. lfx/components/Notion/page_content_viewer.py +1 -1
  27. lfx/components/Notion/search.py +1 -1
  28. lfx/components/Notion/update_page_property.py +1 -1
  29. lfx/components/__init__.py +19 -5
  30. lfx/components/{agents → altk}/__init__.py +5 -9
  31. lfx/components/altk/altk_agent.py +193 -0
  32. lfx/components/apify/apify_actor.py +1 -1
  33. lfx/components/composio/__init__.py +70 -18
  34. lfx/components/composio/apollo_composio.py +11 -0
  35. lfx/components/composio/bitbucket_composio.py +11 -0
  36. lfx/components/composio/canva_composio.py +11 -0
  37. lfx/components/composio/coda_composio.py +11 -0
  38. lfx/components/composio/composio_api.py +10 -0
  39. lfx/components/composio/discord_composio.py +1 -1
  40. lfx/components/composio/elevenlabs_composio.py +11 -0
  41. lfx/components/composio/exa_composio.py +11 -0
  42. lfx/components/composio/firecrawl_composio.py +11 -0
  43. lfx/components/composio/fireflies_composio.py +11 -0
  44. lfx/components/composio/gmail_composio.py +1 -1
  45. lfx/components/composio/googlebigquery_composio.py +11 -0
  46. lfx/components/composio/googlecalendar_composio.py +1 -1
  47. lfx/components/composio/googledocs_composio.py +1 -1
  48. lfx/components/composio/googlemeet_composio.py +1 -1
  49. lfx/components/composio/googlesheets_composio.py +1 -1
  50. lfx/components/composio/googletasks_composio.py +1 -1
  51. lfx/components/composio/heygen_composio.py +11 -0
  52. lfx/components/composio/mem0_composio.py +11 -0
  53. lfx/components/composio/peopledatalabs_composio.py +11 -0
  54. lfx/components/composio/perplexityai_composio.py +11 -0
  55. lfx/components/composio/serpapi_composio.py +11 -0
  56. lfx/components/composio/slack_composio.py +3 -574
  57. lfx/components/composio/slackbot_composio.py +1 -1
  58. lfx/components/composio/snowflake_composio.py +11 -0
  59. lfx/components/composio/tavily_composio.py +11 -0
  60. lfx/components/composio/youtube_composio.py +2 -2
  61. lfx/components/cuga/__init__.py +34 -0
  62. lfx/components/cuga/cuga_agent.py +730 -0
  63. lfx/components/data/__init__.py +78 -28
  64. lfx/components/data_source/__init__.py +58 -0
  65. lfx/components/{data → data_source}/api_request.py +26 -3
  66. lfx/components/{data → data_source}/csv_to_data.py +15 -10
  67. lfx/components/{data → data_source}/json_to_data.py +15 -8
  68. lfx/components/{data → data_source}/news_search.py +1 -1
  69. lfx/components/{data → data_source}/rss.py +1 -1
  70. lfx/components/{data → data_source}/sql_executor.py +1 -1
  71. lfx/components/{data → data_source}/url.py +1 -1
  72. lfx/components/{data → data_source}/web_search.py +1 -1
  73. lfx/components/datastax/astradb_cql.py +1 -1
  74. lfx/components/datastax/astradb_graph.py +1 -1
  75. lfx/components/datastax/astradb_tool.py +1 -1
  76. lfx/components/datastax/astradb_vectorstore.py +1 -1
  77. lfx/components/datastax/hcd.py +1 -1
  78. lfx/components/deactivated/json_document_builder.py +1 -1
  79. lfx/components/docling/__init__.py +0 -3
  80. lfx/components/elastic/elasticsearch.py +1 -1
  81. lfx/components/elastic/opensearch_multimodal.py +1575 -0
  82. lfx/components/files_and_knowledge/__init__.py +47 -0
  83. lfx/components/{data → files_and_knowledge}/directory.py +1 -1
  84. lfx/components/{data → files_and_knowledge}/file.py +246 -18
  85. lfx/components/{knowledge_bases → files_and_knowledge}/retrieval.py +2 -2
  86. lfx/components/{data → files_and_knowledge}/save_file.py +142 -22
  87. lfx/components/flow_controls/__init__.py +58 -0
  88. lfx/components/{logic → flow_controls}/conditional_router.py +1 -1
  89. lfx/components/{logic → flow_controls}/loop.py +43 -9
  90. lfx/components/flow_controls/run_flow.py +108 -0
  91. lfx/components/glean/glean_search_api.py +1 -1
  92. lfx/components/groq/groq.py +35 -28
  93. lfx/components/helpers/__init__.py +102 -0
  94. lfx/components/input_output/__init__.py +3 -1
  95. lfx/components/input_output/chat.py +4 -3
  96. lfx/components/input_output/chat_output.py +4 -4
  97. lfx/components/input_output/text.py +1 -1
  98. lfx/components/input_output/text_output.py +1 -1
  99. lfx/components/{data → input_output}/webhook.py +1 -1
  100. lfx/components/knowledge_bases/__init__.py +59 -4
  101. lfx/components/langchain_utilities/character.py +1 -1
  102. lfx/components/langchain_utilities/csv_agent.py +84 -16
  103. lfx/components/langchain_utilities/json_agent.py +67 -12
  104. lfx/components/langchain_utilities/language_recursive.py +1 -1
  105. lfx/components/llm_operations/__init__.py +46 -0
  106. lfx/components/{processing → llm_operations}/batch_run.py +1 -1
  107. lfx/components/{processing → llm_operations}/lambda_filter.py +1 -1
  108. lfx/components/{logic → llm_operations}/llm_conditional_router.py +1 -1
  109. lfx/components/{processing/llm_router.py → llm_operations/llm_selector.py} +3 -3
  110. lfx/components/{processing → llm_operations}/structured_output.py +1 -1
  111. lfx/components/logic/__init__.py +126 -0
  112. lfx/components/mem0/mem0_chat_memory.py +11 -0
  113. lfx/components/models/__init__.py +64 -9
  114. lfx/components/models_and_agents/__init__.py +49 -0
  115. lfx/components/{agents → models_and_agents}/agent.py +2 -2
  116. lfx/components/models_and_agents/embedding_model.py +423 -0
  117. lfx/components/models_and_agents/language_model.py +398 -0
  118. lfx/components/{agents → models_and_agents}/mcp_component.py +53 -44
  119. lfx/components/{helpers → models_and_agents}/memory.py +1 -1
  120. lfx/components/nvidia/system_assist.py +1 -1
  121. lfx/components/olivya/olivya.py +1 -1
  122. lfx/components/ollama/ollama.py +17 -3
  123. lfx/components/processing/__init__.py +9 -57
  124. lfx/components/processing/converter.py +1 -1
  125. lfx/components/processing/dataframe_operations.py +1 -1
  126. lfx/components/processing/parse_json_data.py +2 -2
  127. lfx/components/processing/parser.py +1 -1
  128. lfx/components/processing/split_text.py +1 -1
  129. lfx/components/qdrant/qdrant.py +1 -1
  130. lfx/components/redis/redis.py +1 -1
  131. lfx/components/twelvelabs/split_video.py +10 -0
  132. lfx/components/twelvelabs/video_file.py +12 -0
  133. lfx/components/utilities/__init__.py +43 -0
  134. lfx/components/{helpers → utilities}/calculator_core.py +1 -1
  135. lfx/components/{helpers → utilities}/current_date.py +1 -1
  136. lfx/components/{processing → utilities}/python_repl_core.py +1 -1
  137. lfx/components/vectorstores/local_db.py +9 -0
  138. lfx/components/youtube/youtube_transcripts.py +118 -30
  139. lfx/custom/custom_component/component.py +57 -1
  140. lfx/custom/custom_component/custom_component.py +68 -6
  141. lfx/graph/edge/base.py +43 -20
  142. lfx/graph/graph/base.py +4 -1
  143. lfx/graph/state/model.py +15 -2
  144. lfx/graph/utils.py +6 -0
  145. lfx/graph/vertex/base.py +4 -1
  146. lfx/graph/vertex/param_handler.py +10 -7
  147. lfx/helpers/__init__.py +12 -0
  148. lfx/helpers/flow.py +117 -0
  149. lfx/inputs/input_mixin.py +24 -1
  150. lfx/inputs/inputs.py +13 -1
  151. lfx/interface/components.py +161 -83
  152. lfx/log/logger.py +5 -3
  153. lfx/services/database/__init__.py +5 -0
  154. lfx/services/database/service.py +25 -0
  155. lfx/services/deps.py +87 -22
  156. lfx/services/manager.py +19 -6
  157. lfx/services/mcp_composer/service.py +998 -157
  158. lfx/services/session.py +5 -0
  159. lfx/services/settings/base.py +51 -7
  160. lfx/services/settings/constants.py +8 -0
  161. lfx/services/storage/local.py +76 -46
  162. lfx/services/storage/service.py +152 -29
  163. lfx/template/field/base.py +3 -0
  164. lfx/utils/ssrf_protection.py +384 -0
  165. lfx/utils/validate_cloud.py +26 -0
  166. {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/METADATA +38 -22
  167. {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/RECORD +182 -150
  168. {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/WHEEL +1 -1
  169. lfx/components/agents/altk_agent.py +0 -366
  170. lfx/components/agents/cuga_agent.py +0 -1013
  171. lfx/components/docling/docling_remote_vlm.py +0 -284
  172. lfx/components/logic/run_flow.py +0 -71
  173. lfx/components/models/embedding_model.py +0 -195
  174. lfx/components/models/language_model.py +0 -144
  175. /lfx/components/{data → data_source}/mock_data.py +0 -0
  176. /lfx/components/{knowledge_bases → files_and_knowledge}/ingestion.py +0 -0
  177. /lfx/components/{logic → flow_controls}/data_conditional_router.py +0 -0
  178. /lfx/components/{logic → flow_controls}/flow_tool.py +0 -0
  179. /lfx/components/{logic → flow_controls}/listen.py +0 -0
  180. /lfx/components/{logic → flow_controls}/notify.py +0 -0
  181. /lfx/components/{logic → flow_controls}/pass_message.py +0 -0
  182. /lfx/components/{logic → flow_controls}/sub_flow.py +0 -0
  183. /lfx/components/{processing → models_and_agents}/prompt.py +0 -0
  184. /lfx/components/{helpers → processing}/create_list.py +0 -0
  185. /lfx/components/{helpers → processing}/output_parser.py +0 -0
  186. /lfx/components/{helpers → processing}/store_message.py +0 -0
  187. /lfx/components/{helpers → utilities}/id_generator.py +0 -0
  188. {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/entry_points.txt +0 -0
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: hatchling 1.27.0
2
+ Generator: hatchling 1.28.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,366 +0,0 @@
1
- import ast
2
- import json
3
- import uuid
4
- from collections.abc import Sequence
5
- from typing import TYPE_CHECKING, Any, cast
6
-
7
- from altk.core.llm import get_llm
8
- from altk.core.toolkit import AgentPhase
9
- from altk.post_tool.code_generation.code_generation import (
10
- CodeGenerationComponent,
11
- CodeGenerationComponentConfig,
12
- )
13
- from altk.post_tool.core.toolkit import CodeGenerationRunInput
14
- from langchain.agents import AgentExecutor, BaseMultiActionAgent, BaseSingleActionAgent
15
- from langchain_anthropic.chat_models import ChatAnthropic
16
- from langchain_core.language_models.chat_models import BaseChatModel
17
- from langchain_core.messages import BaseMessage, HumanMessage
18
- from langchain_core.runnables import Runnable, RunnableBinding
19
- from langchain_core.tools import BaseTool
20
- from langchain_openai.chat_models.base import ChatOpenAI
21
- from pydantic import Field
22
-
23
- from lfx.base.agents.callback import AgentAsyncHandler
24
- from lfx.base.agents.events import ExceptionWithMessageError, process_agent_events
25
- from lfx.base.agents.utils import data_to_messages, get_chat_output_sender_name
26
- from lfx.base.models.model_input_constants import (
27
- MODEL_PROVIDERS_DICT,
28
- MODELS_METADATA,
29
- )
30
- from lfx.components.agents import AgentComponent
31
- from lfx.inputs.inputs import BoolInput
32
- from lfx.io import DropdownInput, IntInput, Output
33
- from lfx.log.logger import logger
34
- from lfx.memory import delete_message
35
- from lfx.schema.content_block import ContentBlock
36
- from lfx.schema.data import Data
37
- from lfx.schema.message import Message
38
- from lfx.utils.constants import MESSAGE_SENDER_AI
39
-
40
- if TYPE_CHECKING:
41
- from lfx.schema.log import SendMessageFunctionType
42
-
43
-
44
- def set_advanced_true(component_input):
45
- component_input.advanced = True
46
- return component_input
47
-
48
-
49
- MODEL_PROVIDERS_LIST = ["Anthropic", "OpenAI"]
50
- INPUT_NAMES_TO_BE_OVERRIDDEN = ["agent_llm"]
51
-
52
-
53
- def get_parent_agent_inputs():
54
- return [
55
- input_field for input_field in AgentComponent.inputs if input_field.name not in INPUT_NAMES_TO_BE_OVERRIDDEN
56
- ]
57
-
58
-
59
- class PostToolProcessor(BaseTool):
60
- """A tool output processor to process tool outputs.
61
-
62
- This wrapper intercepts the tool execution output and
63
- if the tool output is a JSON, it invokes an ALTK component
64
- to extract information from the JSON by generating Python code.
65
- """
66
-
67
- name: str = Field(...)
68
- description: str = Field(...)
69
- wrapped_tool: BaseTool = Field(...)
70
- user_query: str = Field(...)
71
- agent: Runnable | BaseSingleActionAgent | BaseMultiActionAgent | AgentExecutor = Field(...)
72
- response_processing_size_threshold: int = Field(...)
73
-
74
- def __init__(
75
- self, wrapped_tool: BaseTool, user_query: str, agent, response_processing_size_threshold: int, **kwargs
76
- ):
77
- super().__init__(
78
- name=wrapped_tool.name,
79
- description=wrapped_tool.description,
80
- wrapped_tool=wrapped_tool,
81
- user_query=user_query,
82
- agent=agent,
83
- response_processing_size_threshold=response_processing_size_threshold,
84
- **kwargs,
85
- )
86
-
87
- def _execute_tool(self, *args, **kwargs) -> str:
88
- """Execute the wrapped tool with proper error handling."""
89
- try:
90
- # Try with config parameter first (newer LangChain versions)
91
- if hasattr(self.wrapped_tool, "_run"):
92
- # Ensure config is provided for StructuredTool
93
- if "config" not in kwargs:
94
- kwargs["config"] = {}
95
- return self.wrapped_tool._run(*args, **kwargs) # noqa: SLF001
96
- return self.wrapped_tool.run(*args, **kwargs)
97
- except TypeError as e:
98
- if "config" in str(e):
99
- # Fallback: try without config for older tools
100
- kwargs.pop("config", None)
101
- if hasattr(self.wrapped_tool, "_run"):
102
- return self.wrapped_tool._run(*args, **kwargs) # noqa: SLF001
103
- return self.wrapped_tool.run(*args, **kwargs)
104
- raise
105
-
106
- def _run(self, *args: Any, **kwargs: Any) -> str:
107
- # Run the wrapped tool
108
- result = self._execute_tool(*args, **kwargs)
109
-
110
- # Run postprocessing and return the output
111
- return self.process_tool_response(result)
112
-
113
- def _get_tool_response_str(self, tool_response) -> str | None:
114
- if isinstance(tool_response, str):
115
- tool_response_str = tool_response
116
- elif isinstance(tool_response, Data):
117
- tool_response_str = str(tool_response.data)
118
- elif isinstance(tool_response, list) and all(isinstance(item, Data) for item in tool_response):
119
- # get only the first element, not 100% sure if it should be the first or the last
120
- tool_response_str = str(tool_response[0].data)
121
- elif isinstance(tool_response, (dict, list)):
122
- tool_response_str = str(tool_response)
123
- else:
124
- tool_response_str = None
125
- return tool_response_str
126
-
127
- def _get_altk_llm_object(self) -> Any:
128
- # Extract the LLM model and map it to altk model inputs
129
- llm_object: BaseChatModel | None = None
130
- steps = getattr(self.agent, "steps", None)
131
- if steps:
132
- for step in steps:
133
- if isinstance(step, RunnableBinding) and isinstance(step.bound, BaseChatModel):
134
- llm_object = step.bound
135
- break
136
- if isinstance(llm_object, ChatAnthropic):
137
- # litellm needs the prefix to the model name for anthropic
138
- model_name = f"anthropic/{llm_object.model}"
139
- api_key = llm_object.anthropic_api_key.get_secret_value()
140
- llm_client = get_llm("litellm")
141
- llm_client_obj = llm_client(model_name=model_name, api_key=api_key)
142
- elif isinstance(llm_object, ChatOpenAI):
143
- model_name = llm_object.model_name
144
- api_key = llm_object.openai_api_key.get_secret_value()
145
- llm_client = get_llm("openai.sync")
146
- llm_client_obj = llm_client(model=model_name, api_key=api_key)
147
- else:
148
- logger.info("ALTK currently only supports OpenAI and Anthropic models through Langflow.")
149
- llm_client_obj = None
150
-
151
- return llm_client_obj
152
-
153
- def process_tool_response(self, tool_response: str, **_kwargs):
154
- logger.info("Calling process_tool_response of PostToolProcessor")
155
- tool_response_str = self._get_tool_response_str(tool_response)
156
-
157
- try:
158
- tool_response_json = ast.literal_eval(tool_response_str)
159
- if not isinstance(tool_response_json, (list, dict)):
160
- tool_response_json = None
161
- except (json.JSONDecodeError, TypeError) as e:
162
- logger.info(
163
- f"An error in converting the tool response to json, this will skip the code generation component: {e}"
164
- )
165
- tool_response_json = None
166
-
167
- if tool_response_json is not None and len(str(tool_response_json)) > self.response_processing_size_threshold:
168
- llm_client_obj = self._get_altk_llm_object()
169
- if llm_client_obj is not None:
170
- config = CodeGenerationComponentConfig(llm_client=llm_client_obj, use_docker_sandbox=False)
171
-
172
- middleware = CodeGenerationComponent(config=config)
173
- input_data = CodeGenerationRunInput(
174
- messages=[], nl_query=self.user_query, tool_response=tool_response_json
175
- )
176
- output = None
177
- try:
178
- output = middleware.process(input_data, AgentPhase.RUNTIME)
179
- except Exception as e: # noqa: BLE001
180
- logger.error(f"Exception in executing CodeGenerationComponent: {e}")
181
- logger.info(f"Output of CodeGenerationComponent: {output.result}")
182
- return output.result
183
- return tool_response
184
-
185
-
186
- class ALTKAgentComponent(AgentComponent):
187
- """An advanced tool calling agent.
188
-
189
- The ALTKAgent is an advanced AI agent that enhances the tool calling capabilities of LLMs
190
- by performing special checks and processing around tool calls.
191
- It uses components from the Agent Lifecycle ToolKit (https://github.com/AgentToolkit/agent-lifecycle-toolkit)
192
- """
193
-
194
- display_name: str = "ALTK Agent"
195
- description: str = "Agent with enhanced tool calling capabilities. For more information on ALTK, visit https://github.com/AgentToolkit/agent-lifecycle-toolkit"
196
- documentation: str = "https://docs.langflow.org/agents"
197
- icon = "bot"
198
- beta = True
199
- name = "ALTKAgent"
200
-
201
- # Filter out json_mode from OpenAI inputs since we handle structured output differently
202
- if "OpenAI" in MODEL_PROVIDERS_DICT:
203
- openai_inputs_filtered = [
204
- input_field
205
- for input_field in MODEL_PROVIDERS_DICT["OpenAI"]["inputs"]
206
- if not (hasattr(input_field, "name") and input_field.name == "json_mode")
207
- ]
208
- else:
209
- openai_inputs_filtered = []
210
-
211
- inputs = [
212
- DropdownInput(
213
- name="agent_llm",
214
- display_name="Model Provider",
215
- info="The provider of the language model that the agent will use to generate responses.",
216
- options=[*MODEL_PROVIDERS_LIST],
217
- value="OpenAI",
218
- real_time_refresh=True,
219
- refresh_button=False,
220
- input_types=[],
221
- options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST if key in MODELS_METADATA],
222
- ),
223
- *get_parent_agent_inputs(),
224
- BoolInput(
225
- name="enable_post_tool_reflection",
226
- display_name="Post Tool JSON Processing",
227
- info="If true, it passes the tool output to a json processing (if json) step.",
228
- value=True,
229
- ),
230
- # Post Tool Processing is applied only when the number of characters in the response
231
- # exceed the following threshold
232
- IntInput(
233
- name="response_processing_size_threshold",
234
- display_name="Response Processing Size Threshold",
235
- value=100,
236
- info="Tool output is post-processed only if the response length exceeds a specified character threshold.",
237
- advanced=True,
238
- show=True,
239
- ),
240
- ]
241
- outputs = [
242
- Output(name="response", display_name="Response", method="message_response"),
243
- ]
244
-
245
- def update_runnable_instance(
246
- self, agent: AgentExecutor, runnable: AgentExecutor, tools: Sequence[BaseTool]
247
- ) -> AgentExecutor:
248
- user_query = self.input_value.get_text() if hasattr(self.input_value, "get_text") else self.input_value
249
- if self.enable_post_tool_reflection:
250
- wrapped_tools = [
251
- PostToolProcessor(
252
- wrapped_tool=tool,
253
- user_query=user_query,
254
- agent=agent,
255
- response_processing_size_threshold=self.response_processing_size_threshold,
256
- )
257
- if not isinstance(tool, PostToolProcessor)
258
- else tool
259
- for tool in tools
260
- ]
261
- else:
262
- wrapped_tools = tools
263
-
264
- runnable.tools = wrapped_tools
265
-
266
- return runnable
267
-
268
- async def run_agent(
269
- self,
270
- agent: Runnable | BaseSingleActionAgent | BaseMultiActionAgent | AgentExecutor,
271
- ) -> Message:
272
- if isinstance(agent, AgentExecutor):
273
- runnable = agent
274
- else:
275
- # note the tools are not required to run the agent, hence the validation removed.
276
- handle_parsing_errors = hasattr(self, "handle_parsing_errors") and self.handle_parsing_errors
277
- verbose = hasattr(self, "verbose") and self.verbose
278
- max_iterations = hasattr(self, "max_iterations") and self.max_iterations
279
- runnable = AgentExecutor.from_agent_and_tools(
280
- agent=agent,
281
- tools=self.tools or [],
282
- handle_parsing_errors=handle_parsing_errors,
283
- verbose=verbose,
284
- max_iterations=max_iterations,
285
- )
286
- runnable = self.update_runnable_instance(agent, runnable, self.tools)
287
-
288
- # Convert input_value to proper format for agent
289
- if hasattr(self.input_value, "to_lc_message") and callable(self.input_value.to_lc_message):
290
- lc_message = self.input_value.to_lc_message()
291
- input_text = lc_message.content if hasattr(lc_message, "content") else str(lc_message)
292
- else:
293
- lc_message = None
294
- input_text = self.input_value
295
-
296
- input_dict: dict[str, str | list[BaseMessage]] = {}
297
- if hasattr(self, "system_prompt"):
298
- input_dict["system_prompt"] = self.system_prompt
299
- if hasattr(self, "chat_history") and self.chat_history:
300
- if (
301
- hasattr(self.chat_history, "to_data")
302
- and callable(self.chat_history.to_data)
303
- and self.chat_history.__class__.__name__ == "Data"
304
- ):
305
- input_dict["chat_history"] = data_to_messages(self.chat_history)
306
- # Handle both lfx.schema.message.Message and langflow.schema.message.Message types
307
- if all(hasattr(m, "to_data") and callable(m.to_data) and "text" in m.data for m in self.chat_history):
308
- input_dict["chat_history"] = data_to_messages(self.chat_history)
309
- if all(isinstance(m, Message) for m in self.chat_history):
310
- input_dict["chat_history"] = data_to_messages([m.to_data() for m in self.chat_history])
311
- if hasattr(lc_message, "content") and isinstance(lc_message.content, list):
312
- # ! Because the input has to be a string, we must pass the images in the chat_history
313
-
314
- image_dicts = [item for item in lc_message.content if item.get("type") == "image"]
315
- lc_message.content = [item for item in lc_message.content if item.get("type") != "image"]
316
-
317
- if "chat_history" not in input_dict:
318
- input_dict["chat_history"] = []
319
- if isinstance(input_dict["chat_history"], list):
320
- input_dict["chat_history"].extend(HumanMessage(content=[image_dict]) for image_dict in image_dicts)
321
- else:
322
- input_dict["chat_history"] = [HumanMessage(content=[image_dict]) for image_dict in image_dicts]
323
- input_dict["input"] = input_text
324
- if hasattr(self, "graph"):
325
- session_id = self.graph.session_id
326
- elif hasattr(self, "_session_id"):
327
- session_id = self._session_id
328
- else:
329
- session_id = None
330
-
331
- try:
332
- sender_name = get_chat_output_sender_name(self)
333
- except AttributeError:
334
- sender_name = self.display_name or "AI"
335
-
336
- agent_message = Message(
337
- sender=MESSAGE_SENDER_AI,
338
- sender_name=sender_name,
339
- properties={"icon": "Bot", "state": "partial"},
340
- content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
341
- session_id=session_id or uuid.uuid4(),
342
- )
343
- try:
344
- result = await process_agent_events(
345
- runnable.astream_events(
346
- input_dict,
347
- config={"callbacks": [AgentAsyncHandler(self.log), *self.get_langchain_callbacks()]},
348
- version="v2",
349
- ),
350
- agent_message,
351
- cast("SendMessageFunctionType", self.send_message),
352
- )
353
- except ExceptionWithMessageError as e:
354
- if hasattr(e, "agent_message") and hasattr(e.agent_message, "id"):
355
- msg_id = e.agent_message.id
356
- await delete_message(id_=msg_id)
357
- await self._send_message_event(e.agent_message, category="remove_message")
358
- logger.error(f"ExceptionWithMessageError: {e}")
359
- raise
360
- except Exception as e:
361
- # Log or handle any other exceptions
362
- logger.error(f"Error: {e}")
363
- raise
364
-
365
- self.status = result
366
- return result