lfx-nightly 0.1.13.dev9__py3-none-any.whl → 0.1.13.dev11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lfx-nightly might be problematic. Click here for more details.
- lfx/_assets/component_index.json +1 -1
- lfx/base/agents/agent.py +71 -40
- lfx/base/agents/events.py +102 -35
- lfx/components/models/embedding_model.py +11 -8
- {lfx_nightly-0.1.13.dev9.dist-info → lfx_nightly-0.1.13.dev11.dist-info}/METADATA +1 -1
- {lfx_nightly-0.1.13.dev9.dist-info → lfx_nightly-0.1.13.dev11.dist-info}/RECORD +8 -8
- {lfx_nightly-0.1.13.dev9.dist-info → lfx_nightly-0.1.13.dev11.dist-info}/WHEEL +0 -0
- {lfx_nightly-0.1.13.dev9.dist-info → lfx_nightly-0.1.13.dev11.dist-info}/entry_points.txt +0 -0
lfx/base/agents/agent.py
CHANGED
|
@@ -6,12 +6,12 @@ from typing import TYPE_CHECKING, cast
|
|
|
6
6
|
from langchain.agents import AgentExecutor, BaseMultiActionAgent, BaseSingleActionAgent
|
|
7
7
|
from langchain.agents.agent import RunnableAgent
|
|
8
8
|
from langchain.callbacks.base import BaseCallbackHandler
|
|
9
|
-
from langchain_core.messages import HumanMessage
|
|
9
|
+
from langchain_core.messages import BaseMessage, HumanMessage
|
|
10
10
|
from langchain_core.runnables import Runnable
|
|
11
11
|
|
|
12
12
|
from lfx.base.agents.callback import AgentAsyncHandler
|
|
13
13
|
from lfx.base.agents.events import ExceptionWithMessageError, process_agent_events
|
|
14
|
-
from lfx.base.agents.utils import
|
|
14
|
+
from lfx.base.agents.utils import get_chat_output_sender_name
|
|
15
15
|
from lfx.custom.custom_component.component import Component, _get_component_toolkit
|
|
16
16
|
from lfx.field_typing import Tool
|
|
17
17
|
from lfx.inputs.inputs import InputTypes, MultilineInput
|
|
@@ -20,14 +20,13 @@ from lfx.log.logger import logger
|
|
|
20
20
|
from lfx.memory import delete_message
|
|
21
21
|
from lfx.schema.content_block import ContentBlock
|
|
22
22
|
from lfx.schema.data import Data
|
|
23
|
+
from lfx.schema.log import OnTokenFunctionType
|
|
23
24
|
from lfx.schema.message import Message
|
|
24
25
|
from lfx.template.field.base import Output
|
|
25
26
|
from lfx.utils.constants import MESSAGE_SENDER_AI
|
|
26
27
|
|
|
27
28
|
if TYPE_CHECKING:
|
|
28
|
-
from
|
|
29
|
-
|
|
30
|
-
from lfx.schema.log import SendMessageFunctionType
|
|
29
|
+
from lfx.schema.log import OnTokenFunctionType, SendMessageFunctionType
|
|
31
30
|
|
|
32
31
|
|
|
33
32
|
DEFAULT_TOOLS_DESCRIPTION = "A helpful assistant with access to the following tools:"
|
|
@@ -126,6 +125,24 @@ class LCAgentComponent(Component):
|
|
|
126
125
|
# might be overridden in subclasses
|
|
127
126
|
return None
|
|
128
127
|
|
|
128
|
+
def _data_to_messages_skip_empty(self, data: list[Data]) -> list[BaseMessage]:
|
|
129
|
+
"""Convert data to messages, filtering only empty text while preserving non-text content.
|
|
130
|
+
|
|
131
|
+
Note: added to fix issue with certain providers failing when given empty text as input.
|
|
132
|
+
"""
|
|
133
|
+
messages = []
|
|
134
|
+
for value in data:
|
|
135
|
+
# Only skip if the message has a text attribute that is empty/whitespace
|
|
136
|
+
text = getattr(value, "text", None)
|
|
137
|
+
if isinstance(text, str) and not text.strip():
|
|
138
|
+
# Skip only messages with empty/whitespace-only text strings
|
|
139
|
+
continue
|
|
140
|
+
|
|
141
|
+
lc_message = value.to_lc_message()
|
|
142
|
+
messages.append(lc_message)
|
|
143
|
+
|
|
144
|
+
return messages
|
|
145
|
+
|
|
129
146
|
async def run_agent(
|
|
130
147
|
self,
|
|
131
148
|
agent: Runnable | BaseSingleActionAgent | BaseMultiActionAgent | AgentExecutor,
|
|
@@ -145,57 +162,64 @@ class LCAgentComponent(Component):
|
|
|
145
162
|
max_iterations=max_iterations,
|
|
146
163
|
)
|
|
147
164
|
# Convert input_value to proper format for agent
|
|
148
|
-
|
|
165
|
+
lc_message = None
|
|
166
|
+
if isinstance(self.input_value, Message):
|
|
149
167
|
lc_message = self.input_value.to_lc_message()
|
|
150
|
-
|
|
168
|
+
input_dict: dict[str, str | list[BaseMessage] | BaseMessage] = {"input": lc_message}
|
|
151
169
|
else:
|
|
152
|
-
|
|
153
|
-
input_text = self.input_value
|
|
170
|
+
input_dict = {"input": self.input_value}
|
|
154
171
|
|
|
155
|
-
input_dict: dict[str, str | list[BaseMessage]] = {}
|
|
156
172
|
if hasattr(self, "system_prompt"):
|
|
157
173
|
input_dict["system_prompt"] = self.system_prompt
|
|
158
|
-
if hasattr(self, "chat_history") and self.chat_history:
|
|
159
|
-
if (
|
|
160
|
-
hasattr(self.chat_history, "to_data")
|
|
161
|
-
and callable(self.chat_history.to_data)
|
|
162
|
-
and self.chat_history.__class__.__name__ == "Data"
|
|
163
|
-
):
|
|
164
|
-
input_dict["chat_history"] = data_to_messages(self.chat_history)
|
|
165
|
-
# Handle both lfx.schema.message.Message and langflow.schema.message.Message types
|
|
166
|
-
if all(hasattr(m, "to_data") and callable(m.to_data) and "text" in m.data for m in self.chat_history):
|
|
167
|
-
input_dict["chat_history"] = data_to_messages(self.chat_history)
|
|
168
|
-
if all(isinstance(m, Message) for m in self.chat_history):
|
|
169
|
-
input_dict["chat_history"] = data_to_messages([m.to_data() for m in self.chat_history])
|
|
170
|
-
if hasattr(lc_message, "content") and isinstance(lc_message.content, list):
|
|
171
|
-
# ! Because the input has to be a string, we must pass the images in the chat_history
|
|
172
174
|
|
|
175
|
+
if hasattr(self, "chat_history") and self.chat_history:
|
|
176
|
+
if isinstance(self.chat_history, Data):
|
|
177
|
+
input_dict["chat_history"] = self._data_to_messages_skip_empty([self.chat_history])
|
|
178
|
+
elif all(hasattr(m, "to_data") and callable(m.to_data) and "text" in m.data for m in self.chat_history):
|
|
179
|
+
input_dict["chat_history"] = self._data_to_messages_skip_empty(self.chat_history)
|
|
180
|
+
elif all(isinstance(m, Message) for m in self.chat_history):
|
|
181
|
+
input_dict["chat_history"] = self._data_to_messages_skip_empty([m.to_data() for m in self.chat_history])
|
|
182
|
+
|
|
183
|
+
# Handle multimodal input (images + text)
|
|
184
|
+
# Note: Agent input must be a string, so we extract text and move images to chat_history
|
|
185
|
+
if lc_message is not None and hasattr(lc_message, "content") and isinstance(lc_message.content, list):
|
|
186
|
+
# Extract images and text from the text content items
|
|
173
187
|
image_dicts = [item for item in lc_message.content if item.get("type") == "image"]
|
|
174
|
-
|
|
188
|
+
text_content = [item for item in lc_message.content if item.get("type") != "image"]
|
|
175
189
|
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
190
|
+
text_strings = [
|
|
191
|
+
item.get("text", "")
|
|
192
|
+
for item in text_content
|
|
193
|
+
if item.get("type") == "text" and item.get("text", "").strip()
|
|
194
|
+
]
|
|
180
195
|
|
|
181
|
-
#
|
|
182
|
-
|
|
183
|
-
|
|
196
|
+
# Set input to concatenated text or empty string
|
|
197
|
+
input_dict["input"] = " ".join(text_strings) if text_strings else ""
|
|
198
|
+
|
|
199
|
+
# If input is still a list or empty, provide a default
|
|
200
|
+
if isinstance(input_dict["input"], list) or not input_dict["input"]:
|
|
201
|
+
input_dict["input"] = "Process the provided images."
|
|
184
202
|
|
|
185
203
|
if "chat_history" not in input_dict:
|
|
186
204
|
input_dict["chat_history"] = []
|
|
205
|
+
|
|
187
206
|
if isinstance(input_dict["chat_history"], list):
|
|
188
207
|
input_dict["chat_history"].extend(HumanMessage(content=[image_dict]) for image_dict in image_dicts)
|
|
189
208
|
else:
|
|
190
209
|
input_dict["chat_history"] = [HumanMessage(content=[image_dict]) for image_dict in image_dicts]
|
|
191
210
|
|
|
192
|
-
# Final safety check: ensure
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
211
|
+
# Final safety check: ensure input is never empty (prevents Anthropic API errors)
|
|
212
|
+
current_input = input_dict.get("input", "")
|
|
213
|
+
if isinstance(current_input, list):
|
|
214
|
+
current_input = " ".join(map(str, current_input))
|
|
215
|
+
elif not isinstance(current_input, str):
|
|
216
|
+
current_input = str(current_input)
|
|
217
|
+
|
|
218
|
+
if not current_input.strip():
|
|
219
|
+
input_dict["input"] = "Continue the conversation."
|
|
220
|
+
else:
|
|
221
|
+
input_dict["input"] = current_input
|
|
222
|
+
|
|
199
223
|
if hasattr(self, "graph"):
|
|
200
224
|
session_id = self.graph.session_id
|
|
201
225
|
elif hasattr(self, "_session_id"):
|
|
@@ -204,7 +228,6 @@ class LCAgentComponent(Component):
|
|
|
204
228
|
session_id = None
|
|
205
229
|
|
|
206
230
|
sender_name = get_chat_output_sender_name(self) or self.display_name or "AI"
|
|
207
|
-
|
|
208
231
|
agent_message = Message(
|
|
209
232
|
sender=MESSAGE_SENDER_AI,
|
|
210
233
|
sender_name=sender_name,
|
|
@@ -212,6 +235,13 @@ class LCAgentComponent(Component):
|
|
|
212
235
|
content_blocks=[ContentBlock(title="Agent Steps", contents=[])],
|
|
213
236
|
session_id=session_id or uuid.uuid4(),
|
|
214
237
|
)
|
|
238
|
+
|
|
239
|
+
# Create token callback if event_manager is available
|
|
240
|
+
# This wraps the event_manager's on_token method to match OnTokenFunctionType Protocol
|
|
241
|
+
on_token_callback: OnTokenFunctionType | None = None
|
|
242
|
+
if self._event_manager:
|
|
243
|
+
on_token_callback = cast("OnTokenFunctionType", self._event_manager.on_token)
|
|
244
|
+
|
|
215
245
|
try:
|
|
216
246
|
result = await process_agent_events(
|
|
217
247
|
runnable.astream_events(
|
|
@@ -222,6 +252,7 @@ class LCAgentComponent(Component):
|
|
|
222
252
|
),
|
|
223
253
|
agent_message,
|
|
224
254
|
cast("SendMessageFunctionType", self.send_message),
|
|
255
|
+
on_token_callback,
|
|
225
256
|
)
|
|
226
257
|
except ExceptionWithMessageError as e:
|
|
227
258
|
if hasattr(e, "agent_message") and hasattr(e.agent_message, "id"):
|
lfx/base/agents/events.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
# Add helper functions for each event type
|
|
2
|
+
import asyncio
|
|
2
3
|
from collections.abc import AsyncIterator
|
|
3
4
|
from time import perf_counter
|
|
4
5
|
from typing import Any, Protocol
|
|
@@ -9,7 +10,7 @@ from typing_extensions import TypedDict
|
|
|
9
10
|
|
|
10
11
|
from lfx.schema.content_block import ContentBlock
|
|
11
12
|
from lfx.schema.content_types import TextContent, ToolContent
|
|
12
|
-
from lfx.schema.log import SendMessageFunctionType
|
|
13
|
+
from lfx.schema.log import OnTokenFunctionType, SendMessageFunctionType
|
|
13
14
|
from lfx.schema.message import Message
|
|
14
15
|
|
|
15
16
|
|
|
@@ -53,7 +54,14 @@ def _calculate_duration(start_time: float) -> int:
|
|
|
53
54
|
|
|
54
55
|
|
|
55
56
|
async def handle_on_chain_start(
|
|
56
|
-
event: dict[str, Any],
|
|
57
|
+
event: dict[str, Any],
|
|
58
|
+
agent_message: Message,
|
|
59
|
+
send_message_callback: SendMessageFunctionType,
|
|
60
|
+
send_token_callback: OnTokenFunctionType | None, # noqa: ARG001
|
|
61
|
+
start_time: float,
|
|
62
|
+
*,
|
|
63
|
+
had_streaming: bool = False, # noqa: ARG001
|
|
64
|
+
message_id: str | None = None, # noqa: ARG001
|
|
57
65
|
) -> tuple[Message, float]:
|
|
58
66
|
# Create content blocks if they don't exist
|
|
59
67
|
if not agent_message.content_blocks:
|
|
@@ -80,7 +88,7 @@ async def handle_on_chain_start(
|
|
|
80
88
|
header={"title": "Input", "icon": "MessageSquare"},
|
|
81
89
|
)
|
|
82
90
|
agent_message.content_blocks[0].contents.append(text_content)
|
|
83
|
-
agent_message = await
|
|
91
|
+
agent_message = await send_message_callback(message=agent_message, skip_db_update=True)
|
|
84
92
|
start_time = perf_counter()
|
|
85
93
|
return agent_message, start_time
|
|
86
94
|
|
|
@@ -101,15 +109,23 @@ def _extract_output_text(output: str | list) -> str:
|
|
|
101
109
|
if isinstance(item, dict):
|
|
102
110
|
if "text" in item:
|
|
103
111
|
return item["text"] or ""
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
+
if "content" in item:
|
|
113
|
+
return str(item["content"])
|
|
114
|
+
if "message" in item:
|
|
115
|
+
return str(item["message"])
|
|
116
|
+
|
|
117
|
+
# Special case handling for non-text-like dicts
|
|
118
|
+
if (
|
|
119
|
+
item.get("type") == "tool_use" # Handle tool use items
|
|
120
|
+
or ("index" in item and len(item) == 1) # Handle index-only items
|
|
121
|
+
or "partial_json" in item # Handle partial json items
|
|
122
|
+
# Handle index-only items
|
|
123
|
+
or ("index" in item and not any(k in item for k in ("text", "content", "message")))
|
|
124
|
+
# Handle other metadata-only chunks that don't contain meaningful text
|
|
125
|
+
or not any(key in item for key in ["text", "content", "message"])
|
|
126
|
+
):
|
|
112
127
|
return ""
|
|
128
|
+
|
|
113
129
|
# For any other dict format, return empty string
|
|
114
130
|
return ""
|
|
115
131
|
# For any other single item type (not str or dict), return empty string
|
|
@@ -133,7 +149,14 @@ def _extract_output_text(output: str | list) -> str:
|
|
|
133
149
|
|
|
134
150
|
|
|
135
151
|
async def handle_on_chain_end(
|
|
136
|
-
event: dict[str, Any],
|
|
152
|
+
event: dict[str, Any],
|
|
153
|
+
agent_message: Message,
|
|
154
|
+
send_message_callback: SendMessageFunctionType,
|
|
155
|
+
send_token_callback: OnTokenFunctionType | None, # noqa: ARG001
|
|
156
|
+
start_time: float,
|
|
157
|
+
*,
|
|
158
|
+
had_streaming: bool = False,
|
|
159
|
+
message_id: str | None = None, # noqa: ARG001
|
|
137
160
|
) -> tuple[Message, float]:
|
|
138
161
|
data_output = event["data"].get("output")
|
|
139
162
|
if data_output and isinstance(data_output, AgentFinish) and data_output.return_values.get("output"):
|
|
@@ -151,7 +174,11 @@ async def handle_on_chain_end(
|
|
|
151
174
|
header={"title": "Output", "icon": "MessageSquare"},
|
|
152
175
|
)
|
|
153
176
|
agent_message.content_blocks[0].contents.append(text_content)
|
|
154
|
-
|
|
177
|
+
|
|
178
|
+
# Only send final message if we didn't have streaming chunks
|
|
179
|
+
# If we had streaming, frontend already accumulated the chunks
|
|
180
|
+
if not had_streaming:
|
|
181
|
+
agent_message = await send_message_callback(message=agent_message)
|
|
155
182
|
start_time = perf_counter()
|
|
156
183
|
return agent_message, start_time
|
|
157
184
|
|
|
@@ -160,7 +187,7 @@ async def handle_on_tool_start(
|
|
|
160
187
|
event: dict[str, Any],
|
|
161
188
|
agent_message: Message,
|
|
162
189
|
tool_blocks_map: dict[str, ToolContent],
|
|
163
|
-
|
|
190
|
+
send_message_callback: SendMessageFunctionType,
|
|
164
191
|
start_time: float,
|
|
165
192
|
) -> tuple[Message, float]:
|
|
166
193
|
tool_name = event["name"]
|
|
@@ -190,7 +217,7 @@ async def handle_on_tool_start(
|
|
|
190
217
|
tool_blocks_map[tool_key] = tool_content
|
|
191
218
|
agent_message.content_blocks[0].contents.append(tool_content)
|
|
192
219
|
|
|
193
|
-
agent_message = await
|
|
220
|
+
agent_message = await send_message_callback(message=agent_message, skip_db_update=True)
|
|
194
221
|
if agent_message.content_blocks and agent_message.content_blocks[0].contents:
|
|
195
222
|
tool_blocks_map[tool_key] = agent_message.content_blocks[0].contents[-1]
|
|
196
223
|
return agent_message, new_start_time
|
|
@@ -200,7 +227,7 @@ async def handle_on_tool_end(
|
|
|
200
227
|
event: dict[str, Any],
|
|
201
228
|
agent_message: Message,
|
|
202
229
|
tool_blocks_map: dict[str, ToolContent],
|
|
203
|
-
|
|
230
|
+
send_message_callback: SendMessageFunctionType,
|
|
204
231
|
start_time: float,
|
|
205
232
|
) -> tuple[Message, float]:
|
|
206
233
|
run_id = event.get("run_id", "")
|
|
@@ -209,8 +236,8 @@ async def handle_on_tool_end(
|
|
|
209
236
|
tool_content = tool_blocks_map.get(tool_key)
|
|
210
237
|
|
|
211
238
|
if tool_content and isinstance(tool_content, ToolContent):
|
|
212
|
-
# Call
|
|
213
|
-
agent_message = await
|
|
239
|
+
# Call send_message_callback first to get the updated message structure
|
|
240
|
+
agent_message = await send_message_callback(message=agent_message, skip_db_update=True)
|
|
214
241
|
new_start_time = perf_counter()
|
|
215
242
|
|
|
216
243
|
# Now find and update the tool content in the current message
|
|
@@ -246,7 +273,7 @@ async def handle_on_tool_error(
|
|
|
246
273
|
event: dict[str, Any],
|
|
247
274
|
agent_message: Message,
|
|
248
275
|
tool_blocks_map: dict[str, ToolContent],
|
|
249
|
-
|
|
276
|
+
send_message_callback: SendMessageFunctionType,
|
|
250
277
|
start_time: float,
|
|
251
278
|
) -> tuple[Message, float]:
|
|
252
279
|
run_id = event.get("run_id", "")
|
|
@@ -258,7 +285,7 @@ async def handle_on_tool_error(
|
|
|
258
285
|
tool_content.error = event["data"].get("error", "Unknown error")
|
|
259
286
|
tool_content.duration = _calculate_duration(start_time)
|
|
260
287
|
tool_content.header = {"title": f"Error using **{tool_content.name}**", "icon": "Hammer"}
|
|
261
|
-
agent_message = await
|
|
288
|
+
agent_message = await send_message_callback(message=agent_message, skip_db_update=True)
|
|
262
289
|
start_time = perf_counter()
|
|
263
290
|
return agent_message, start_time
|
|
264
291
|
|
|
@@ -266,8 +293,12 @@ async def handle_on_tool_error(
|
|
|
266
293
|
async def handle_on_chain_stream(
|
|
267
294
|
event: dict[str, Any],
|
|
268
295
|
agent_message: Message,
|
|
269
|
-
|
|
296
|
+
send_message_callback: SendMessageFunctionType, # noqa: ARG001
|
|
297
|
+
send_token_callback: OnTokenFunctionType | None,
|
|
270
298
|
start_time: float,
|
|
299
|
+
*,
|
|
300
|
+
had_streaming: bool = False, # noqa: ARG001
|
|
301
|
+
message_id: str | None = None,
|
|
271
302
|
) -> tuple[Message, float]:
|
|
272
303
|
data_chunk = event["data"].get("chunk", {})
|
|
273
304
|
if isinstance(data_chunk, dict) and data_chunk.get("output"):
|
|
@@ -275,15 +306,26 @@ async def handle_on_chain_stream(
|
|
|
275
306
|
if output and isinstance(output, str | list):
|
|
276
307
|
agent_message.text = _extract_output_text(output)
|
|
277
308
|
agent_message.properties.state = "complete"
|
|
278
|
-
|
|
309
|
+
# Don't call send_message_callback here - we must update in place
|
|
310
|
+
# in order to keep the message id consistent throughout the stream.
|
|
311
|
+
# The final message will be sent after the loop completes
|
|
279
312
|
start_time = perf_counter()
|
|
280
313
|
elif isinstance(data_chunk, AIMessageChunk):
|
|
281
314
|
output_text = _extract_output_text(data_chunk.content)
|
|
282
|
-
if
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
315
|
+
# For streaming, send token event if callback is available
|
|
316
|
+
# Note: we should expect the callback, but we keep it optional for backwards compatibility
|
|
317
|
+
# as of v1.6.5
|
|
318
|
+
if output_text and output_text.strip() and send_token_callback and message_id:
|
|
319
|
+
await asyncio.to_thread(
|
|
320
|
+
send_token_callback,
|
|
321
|
+
data={
|
|
322
|
+
"chunk": output_text,
|
|
323
|
+
"id": str(message_id),
|
|
324
|
+
},
|
|
325
|
+
)
|
|
326
|
+
|
|
286
327
|
if not agent_message.text:
|
|
328
|
+
# Starts the timer when the first message is starting to be generated
|
|
287
329
|
start_time = perf_counter()
|
|
288
330
|
return agent_message, start_time
|
|
289
331
|
|
|
@@ -294,7 +336,7 @@ class ToolEventHandler(Protocol):
|
|
|
294
336
|
event: dict[str, Any],
|
|
295
337
|
agent_message: Message,
|
|
296
338
|
tool_blocks_map: dict[str, ContentBlock],
|
|
297
|
-
|
|
339
|
+
send_message_callback: SendMessageFunctionType,
|
|
298
340
|
start_time: float,
|
|
299
341
|
) -> tuple[Message, float]: ...
|
|
300
342
|
|
|
@@ -304,8 +346,12 @@ class ChainEventHandler(Protocol):
|
|
|
304
346
|
self,
|
|
305
347
|
event: dict[str, Any],
|
|
306
348
|
agent_message: Message,
|
|
307
|
-
|
|
349
|
+
send_message_callback: SendMessageFunctionType,
|
|
350
|
+
send_token_callback: OnTokenFunctionType | None,
|
|
308
351
|
start_time: float,
|
|
352
|
+
*,
|
|
353
|
+
had_streaming: bool = False,
|
|
354
|
+
message_id: str | None = None,
|
|
309
355
|
) -> tuple[Message, float]: ...
|
|
310
356
|
|
|
311
357
|
|
|
@@ -329,7 +375,8 @@ TOOL_EVENT_HANDLERS: dict[str, ToolEventHandler] = {
|
|
|
329
375
|
async def process_agent_events(
|
|
330
376
|
agent_executor: AsyncIterator[dict[str, Any]],
|
|
331
377
|
agent_message: Message,
|
|
332
|
-
|
|
378
|
+
send_message_callback: SendMessageFunctionType,
|
|
379
|
+
send_token_callback: OnTokenFunctionType | None = None,
|
|
333
380
|
) -> Message:
|
|
334
381
|
"""Process agent events and return the final output."""
|
|
335
382
|
if isinstance(agent_message.properties, dict):
|
|
@@ -337,26 +384,46 @@ async def process_agent_events(
|
|
|
337
384
|
else:
|
|
338
385
|
agent_message.properties.icon = "Bot"
|
|
339
386
|
agent_message.properties.state = "partial"
|
|
340
|
-
# Store the initial message
|
|
341
|
-
agent_message = await
|
|
387
|
+
# Store the initial message and capture the message id
|
|
388
|
+
agent_message = await send_message_callback(message=agent_message)
|
|
389
|
+
# Capture the original message id - this must stay consistent throughout if streaming
|
|
390
|
+
initial_message_id = agent_message.id
|
|
342
391
|
try:
|
|
343
392
|
# Create a mapping of run_ids to tool contents
|
|
344
393
|
tool_blocks_map: dict[str, ToolContent] = {}
|
|
394
|
+
had_streaming = False
|
|
345
395
|
start_time = perf_counter()
|
|
396
|
+
|
|
346
397
|
async for event in agent_executor:
|
|
347
398
|
if event["event"] in TOOL_EVENT_HANDLERS:
|
|
348
399
|
tool_handler = TOOL_EVENT_HANDLERS[event["event"]]
|
|
349
400
|
# Use skip_db_update=True during streaming to avoid DB round-trips
|
|
350
401
|
agent_message, start_time = await tool_handler(
|
|
351
|
-
event, agent_message, tool_blocks_map,
|
|
402
|
+
event, agent_message, tool_blocks_map, send_message_callback, start_time
|
|
352
403
|
)
|
|
353
404
|
elif event["event"] in CHAIN_EVENT_HANDLERS:
|
|
354
405
|
chain_handler = CHAIN_EVENT_HANDLERS[event["event"]]
|
|
355
|
-
|
|
356
|
-
|
|
406
|
+
|
|
407
|
+
# Check if this is a streaming event
|
|
408
|
+
if event["event"] in ("on_chain_stream", "on_chat_model_stream"):
|
|
409
|
+
had_streaming = True
|
|
410
|
+
agent_message, start_time = await chain_handler(
|
|
411
|
+
event,
|
|
412
|
+
agent_message,
|
|
413
|
+
send_message_callback,
|
|
414
|
+
send_token_callback,
|
|
415
|
+
start_time,
|
|
416
|
+
had_streaming=had_streaming,
|
|
417
|
+
message_id=initial_message_id,
|
|
418
|
+
)
|
|
419
|
+
else:
|
|
420
|
+
agent_message, start_time = await chain_handler(
|
|
421
|
+
event, agent_message, send_message_callback, None, start_time, had_streaming=had_streaming
|
|
422
|
+
)
|
|
423
|
+
|
|
357
424
|
agent_message.properties.state = "complete"
|
|
358
425
|
# Final DB update with the complete message (skip_db_update=False by default)
|
|
359
|
-
agent_message = await
|
|
426
|
+
agent_message = await send_message_callback(message=agent_message)
|
|
360
427
|
except Exception as e:
|
|
361
428
|
raise ExceptionWithMessageError(agent_message, str(e)) from e
|
|
362
429
|
return await Message.create(**agent_message.model_dump())
|
|
@@ -31,7 +31,7 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
31
31
|
DropdownInput(
|
|
32
32
|
name="provider",
|
|
33
33
|
display_name="Model Provider",
|
|
34
|
-
options=["OpenAI", "Ollama", "
|
|
34
|
+
options=["OpenAI", "Ollama", "IBM watsonx.ai"],
|
|
35
35
|
value="OpenAI",
|
|
36
36
|
info="Select the embedding model provider",
|
|
37
37
|
real_time_refresh=True,
|
|
@@ -62,7 +62,7 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
62
62
|
MessageTextInput(
|
|
63
63
|
name="project_id",
|
|
64
64
|
display_name="Project ID",
|
|
65
|
-
info="
|
|
65
|
+
info="IBM watsonx.ai Project ID (required for IBM watsonx.ai)",
|
|
66
66
|
show=False,
|
|
67
67
|
),
|
|
68
68
|
IntInput(
|
|
@@ -128,7 +128,7 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
128
128
|
**model_kwargs,
|
|
129
129
|
)
|
|
130
130
|
|
|
131
|
-
if provider == "
|
|
131
|
+
if provider == "IBM watsonx.ai":
|
|
132
132
|
try:
|
|
133
133
|
from langchain_ibm import WatsonxEmbeddings
|
|
134
134
|
except ImportError:
|
|
@@ -136,13 +136,13 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
136
136
|
raise ImportError(msg) from None
|
|
137
137
|
|
|
138
138
|
if not api_key:
|
|
139
|
-
msg = "
|
|
139
|
+
msg = "IBM watsonx.ai API key is required when using IBM watsonx.ai provider"
|
|
140
140
|
raise ValueError(msg)
|
|
141
141
|
|
|
142
142
|
project_id = self.project_id
|
|
143
143
|
|
|
144
144
|
if not project_id:
|
|
145
|
-
msg = "Project ID is required for
|
|
145
|
+
msg = "Project ID is required for IBM watsonx.ai provider"
|
|
146
146
|
raise ValueError(msg)
|
|
147
147
|
|
|
148
148
|
params = {
|
|
@@ -167,6 +167,7 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
167
167
|
build_config["api_key"]["required"] = True
|
|
168
168
|
build_config["api_key"]["show"] = True
|
|
169
169
|
build_config["api_base"]["display_name"] = "OpenAI API Base URL"
|
|
170
|
+
build_config["api_base"]["advanced"] = True
|
|
170
171
|
build_config["project_id"]["show"] = False
|
|
171
172
|
|
|
172
173
|
elif field_value == "Ollama":
|
|
@@ -177,16 +178,18 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
177
178
|
build_config["api_key"]["show"] = False
|
|
178
179
|
build_config["api_base"]["display_name"] = "Ollama Base URL"
|
|
179
180
|
build_config["api_base"]["value"] = "http://localhost:11434"
|
|
181
|
+
build_config["api_base"]["advanced"] = True
|
|
180
182
|
build_config["project_id"]["show"] = False
|
|
181
183
|
|
|
182
|
-
elif field_value == "
|
|
184
|
+
elif field_value == "IBM watsonx.ai":
|
|
183
185
|
build_config["model"]["options"] = WATSONX_EMBEDDING_MODEL_NAMES
|
|
184
186
|
build_config["model"]["value"] = WATSONX_EMBEDDING_MODEL_NAMES[0]
|
|
185
|
-
build_config["api_key"]["display_name"] = "
|
|
187
|
+
build_config["api_key"]["display_name"] = "IBM watsonx.ai API Key"
|
|
186
188
|
build_config["api_key"]["required"] = True
|
|
187
189
|
build_config["api_key"]["show"] = True
|
|
188
|
-
build_config["api_base"]["display_name"] = "
|
|
190
|
+
build_config["api_base"]["display_name"] = "IBM watsonx.ai URL"
|
|
189
191
|
build_config["api_base"]["value"] = "https://us-south.ml.cloud.ibm.com"
|
|
192
|
+
build_config["api_base"]["advanced"] = False
|
|
190
193
|
build_config["project_id"]["show"] = True
|
|
191
194
|
|
|
192
195
|
return build_config
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lfx-nightly
|
|
3
|
-
Version: 0.1.13.
|
|
3
|
+
Version: 0.1.13.dev11
|
|
4
4
|
Summary: Langflow Executor - A lightweight CLI tool for executing and serving Langflow AI flows
|
|
5
5
|
Author-email: Gabriel Luiz Freitas Almeida <gabriel@langflow.org>
|
|
6
6
|
Requires-Python: <3.14,>=3.10
|
|
@@ -4,16 +4,16 @@ lfx/constants.py,sha256=Ert_SpwXhutgcTKEvtDArtkONXgyE5x68opMoQfukMA,203
|
|
|
4
4
|
lfx/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
5
5
|
lfx/settings.py,sha256=wnx4zkOLQ8mvampYsnnvVV9GvEnRUuWQpKFSbFTCIp4,181
|
|
6
6
|
lfx/type_extraction.py,sha256=eCZNl9nAQivKdaPv_9BK71N0JV9Rtr--veAht0dnQ4A,2921
|
|
7
|
-
lfx/_assets/component_index.json,sha256=
|
|
7
|
+
lfx/_assets/component_index.json,sha256=5u7MEm93Yt_-xBeIa5pRTGAor8cRFcUMetsV9O5Q2HY,3572648
|
|
8
8
|
lfx/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
9
9
|
lfx/base/constants.py,sha256=v9vo0Ifg8RxDu__XqgGzIXHlsnUFyWM-SSux0uHHoz8,1187
|
|
10
10
|
lfx/base/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
11
|
-
lfx/base/agents/agent.py,sha256=
|
|
11
|
+
lfx/base/agents/agent.py,sha256=uKp5OSNxGbKa-fyZLfHl68f1ZntS49LFLNN49OGcnnY,15199
|
|
12
12
|
lfx/base/agents/callback.py,sha256=mjlT9ukBMVrfjYrHsJowqpY4g9hVGBVBIYhncLWr3tQ,3692
|
|
13
13
|
lfx/base/agents/context.py,sha256=u0wboX1aRR22Ia8gY14WF12RjhE0Rxv9hPBiixT9DtQ,3916
|
|
14
14
|
lfx/base/agents/default_prompts.py,sha256=tUjfczwt4D5R1KozNOl1uSL2V2rSCZeUMS-cfV4Gwn0,955
|
|
15
15
|
lfx/base/agents/errors.py,sha256=4QY1AqSWZaOjq-iQRYH_aeCfH_hWECLQkiwybNXz66U,531
|
|
16
|
-
lfx/base/agents/events.py,sha256=
|
|
16
|
+
lfx/base/agents/events.py,sha256=wk5sEsAkFbZxx_UUYYSvzWwocmAQZzfFlxKVAb_QSQ8,17170
|
|
17
17
|
lfx/base/agents/utils.py,sha256=VEAVYC6oOadjKeZ-cUS-1OOCnWW69FhpcGWzjvR4uZ8,7161
|
|
18
18
|
lfx/base/agents/crewai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
19
19
|
lfx/base/agents/crewai/crew.py,sha256=TN1JyLXMpJc2yPH3tokhFmxKKYoJ4lMvmG19DmpKfeY,7953
|
|
@@ -416,7 +416,7 @@ lfx/components/mistral/__init__.py,sha256=EABXqA45Tz50vZRmhEisbIIPEcRCvV9j-Y9Hf2
|
|
|
416
416
|
lfx/components/mistral/mistral.py,sha256=4heAlIFEeq_ljUZDPpNGyK_VRkWjwCfPbBaQK1mV4NY,3718
|
|
417
417
|
lfx/components/mistral/mistral_embeddings.py,sha256=QuqS_S3yHWCacs-Nc3qalpUsb-OACRWFZenUtCD_rLQ,1963
|
|
418
418
|
lfx/components/models/__init__.py,sha256=hhfj70MkcRATzAjJnntAg1A4E7kHlQn8GT0bizkB7L4,1113
|
|
419
|
-
lfx/components/models/embedding_model.py,sha256=
|
|
419
|
+
lfx/components/models/embedding_model.py,sha256=Q38BxC4xklLOAxS1Vam0EGP0g6cu0Dm7-uf679XImAc,7795
|
|
420
420
|
lfx/components/models/language_model.py,sha256=TA24DMAXrlruY3mNsfI9qGltfQ2h9cQpxe8DW8HLdeE,5992
|
|
421
421
|
lfx/components/mongodb/__init__.py,sha256=nFOQgiIvDnWGiWDSqZ0ERQme5DpA-cQgzybUiqXQtGw,953
|
|
422
422
|
lfx/components/mongodb/mongodb_atlas.py,sha256=OlAstNMToHuvGI-8djkiGr7kdGBr927O0SE5cnVd0O0,8594
|
|
@@ -730,7 +730,7 @@ lfx/utils/schemas.py,sha256=NbOtVQBrn4d0BAu-0H_eCTZI2CXkKZlRY37XCSmuJwc,3865
|
|
|
730
730
|
lfx/utils/util.py,sha256=Ww85wbr1-vjh2pXVtmTqoUVr6MXAW8S7eDx_Ys6HpE8,20696
|
|
731
731
|
lfx/utils/util_strings.py,sha256=nU_IcdphNaj6bAPbjeL-c1cInQPfTBit8mp5Y57lwQk,1686
|
|
732
732
|
lfx/utils/version.py,sha256=cHpbO0OJD2JQAvVaTH_6ibYeFbHJV0QDHs_YXXZ-bT8,671
|
|
733
|
-
lfx_nightly-0.1.13.
|
|
734
|
-
lfx_nightly-0.1.13.
|
|
735
|
-
lfx_nightly-0.1.13.
|
|
736
|
-
lfx_nightly-0.1.13.
|
|
733
|
+
lfx_nightly-0.1.13.dev11.dist-info/METADATA,sha256=dherVA7JNlTrOnZxoaISknodoQzPGMe-n4Q3Szv43bw,8290
|
|
734
|
+
lfx_nightly-0.1.13.dev11.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
735
|
+
lfx_nightly-0.1.13.dev11.dist-info/entry_points.txt,sha256=1724p3RHDQRT2CKx_QRzEIa7sFuSVO0Ux70YfXfoMT4,42
|
|
736
|
+
lfx_nightly-0.1.13.dev11.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|