lfx-nightly 0.2.0.dev41__py3-none-any.whl → 0.3.0.dev3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lfx/__main__.py +137 -6
- lfx/_assets/component_index.json +1 -1
- lfx/base/agents/agent.py +10 -6
- lfx/base/agents/altk_base_agent.py +5 -3
- lfx/base/agents/altk_tool_wrappers.py +1 -1
- lfx/base/agents/events.py +1 -1
- lfx/base/agents/utils.py +4 -0
- lfx/base/composio/composio_base.py +78 -41
- lfx/base/data/cloud_storage_utils.py +156 -0
- lfx/base/data/docling_utils.py +130 -55
- lfx/base/datastax/astradb_base.py +75 -64
- lfx/base/embeddings/embeddings_class.py +113 -0
- lfx/base/models/__init__.py +11 -1
- lfx/base/models/google_generative_ai_constants.py +33 -9
- lfx/base/models/model_metadata.py +6 -0
- lfx/base/models/ollama_constants.py +196 -30
- lfx/base/models/openai_constants.py +37 -10
- lfx/base/models/unified_models.py +1123 -0
- lfx/base/models/watsonx_constants.py +43 -4
- lfx/base/prompts/api_utils.py +40 -5
- lfx/base/tools/component_tool.py +2 -9
- lfx/cli/__init__.py +10 -2
- lfx/cli/commands.py +3 -0
- lfx/cli/run.py +65 -409
- lfx/cli/script_loader.py +18 -7
- lfx/cli/validation.py +6 -3
- lfx/components/__init__.py +0 -3
- lfx/components/composio/github_composio.py +1 -1
- lfx/components/cuga/cuga_agent.py +39 -27
- lfx/components/data_source/api_request.py +4 -2
- lfx/components/datastax/astradb_assistant_manager.py +4 -2
- lfx/components/docling/__init__.py +45 -11
- lfx/components/docling/docling_inline.py +39 -49
- lfx/components/docling/docling_remote.py +1 -0
- lfx/components/elastic/opensearch_multimodal.py +1733 -0
- lfx/components/files_and_knowledge/file.py +384 -36
- lfx/components/files_and_knowledge/ingestion.py +8 -0
- lfx/components/files_and_knowledge/retrieval.py +10 -0
- lfx/components/files_and_knowledge/save_file.py +91 -88
- lfx/components/langchain_utilities/ibm_granite_handler.py +211 -0
- lfx/components/langchain_utilities/tool_calling.py +37 -6
- lfx/components/llm_operations/batch_run.py +64 -18
- lfx/components/llm_operations/lambda_filter.py +213 -101
- lfx/components/llm_operations/llm_conditional_router.py +39 -7
- lfx/components/llm_operations/structured_output.py +38 -12
- lfx/components/models/__init__.py +16 -74
- lfx/components/models_and_agents/agent.py +51 -203
- lfx/components/models_and_agents/embedding_model.py +171 -255
- lfx/components/models_and_agents/language_model.py +54 -318
- lfx/components/models_and_agents/mcp_component.py +96 -10
- lfx/components/models_and_agents/prompt.py +105 -18
- lfx/components/ollama/ollama_embeddings.py +111 -29
- lfx/components/openai/openai_chat_model.py +1 -1
- lfx/components/processing/text_operations.py +580 -0
- lfx/components/vllm/__init__.py +37 -0
- lfx/components/vllm/vllm.py +141 -0
- lfx/components/vllm/vllm_embeddings.py +110 -0
- lfx/custom/custom_component/component.py +65 -10
- lfx/custom/custom_component/custom_component.py +8 -6
- lfx/events/observability/__init__.py +0 -0
- lfx/events/observability/lifecycle_events.py +111 -0
- lfx/field_typing/__init__.py +57 -58
- lfx/graph/graph/base.py +40 -1
- lfx/graph/utils.py +109 -30
- lfx/graph/vertex/base.py +75 -23
- lfx/graph/vertex/vertex_types.py +0 -5
- lfx/inputs/__init__.py +2 -0
- lfx/inputs/input_mixin.py +55 -0
- lfx/inputs/inputs.py +120 -0
- lfx/interface/components.py +24 -7
- lfx/interface/initialize/loading.py +42 -12
- lfx/io/__init__.py +2 -0
- lfx/run/__init__.py +5 -0
- lfx/run/base.py +464 -0
- lfx/schema/__init__.py +50 -0
- lfx/schema/data.py +1 -1
- lfx/schema/image.py +26 -7
- lfx/schema/message.py +104 -11
- lfx/schema/workflow.py +171 -0
- lfx/services/deps.py +12 -0
- lfx/services/interfaces.py +43 -1
- lfx/services/mcp_composer/service.py +7 -1
- lfx/services/schema.py +1 -0
- lfx/services/settings/auth.py +95 -4
- lfx/services/settings/base.py +11 -1
- lfx/services/settings/constants.py +2 -0
- lfx/services/settings/utils.py +82 -0
- lfx/services/storage/local.py +13 -8
- lfx/services/transaction/__init__.py +5 -0
- lfx/services/transaction/service.py +35 -0
- lfx/tests/unit/components/__init__.py +0 -0
- lfx/utils/constants.py +2 -0
- lfx/utils/mustache_security.py +79 -0
- lfx/utils/validate_cloud.py +81 -3
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/METADATA +7 -2
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/RECORD +98 -80
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/WHEEL +0 -0
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from langchain_openai import ChatOpenAI
|
|
4
|
+
from pydantic.v1 import SecretStr
|
|
5
|
+
|
|
6
|
+
from lfx.base.models.model import LCModelComponent
|
|
7
|
+
from lfx.field_typing import LanguageModel
|
|
8
|
+
from lfx.field_typing.range_spec import RangeSpec
|
|
9
|
+
from lfx.inputs.inputs import BoolInput, DictInput, IntInput, SecretStrInput, SliderInput, StrInput
|
|
10
|
+
from lfx.log.logger import logger
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class VllmComponent(LCModelComponent):
|
|
14
|
+
display_name = "vLLM"
|
|
15
|
+
description = "Generates text using vLLM models via OpenAI-compatible API."
|
|
16
|
+
icon = "vLLM"
|
|
17
|
+
name = "vLLMModel"
|
|
18
|
+
|
|
19
|
+
inputs = [
|
|
20
|
+
*LCModelComponent.get_base_inputs(),
|
|
21
|
+
IntInput(
|
|
22
|
+
name="max_tokens",
|
|
23
|
+
display_name="Max Tokens",
|
|
24
|
+
advanced=True,
|
|
25
|
+
info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
|
|
26
|
+
range_spec=RangeSpec(min=0, max=128000),
|
|
27
|
+
),
|
|
28
|
+
DictInput(
|
|
29
|
+
name="model_kwargs",
|
|
30
|
+
display_name="Model Kwargs",
|
|
31
|
+
advanced=True,
|
|
32
|
+
info="Additional keyword arguments to pass to the model.",
|
|
33
|
+
),
|
|
34
|
+
BoolInput(
|
|
35
|
+
name="json_mode",
|
|
36
|
+
display_name="JSON Mode",
|
|
37
|
+
advanced=True,
|
|
38
|
+
info="If True, it will output JSON regardless of passing a schema.",
|
|
39
|
+
),
|
|
40
|
+
StrInput(
|
|
41
|
+
name="model_name",
|
|
42
|
+
display_name="Model Name",
|
|
43
|
+
advanced=False,
|
|
44
|
+
info="The name of the vLLM model to use (e.g., 'ibm-granite/granite-3.3-8b-instruct').",
|
|
45
|
+
value="ibm-granite/granite-3.3-8b-instruct",
|
|
46
|
+
),
|
|
47
|
+
StrInput(
|
|
48
|
+
name="api_base",
|
|
49
|
+
display_name="vLLM API Base",
|
|
50
|
+
advanced=False,
|
|
51
|
+
info="The base URL of the vLLM API server. Defaults to http://localhost:8000/v1 for local vLLM server.",
|
|
52
|
+
value="http://localhost:8000/v1",
|
|
53
|
+
),
|
|
54
|
+
SecretStrInput(
|
|
55
|
+
name="api_key",
|
|
56
|
+
display_name="API Key",
|
|
57
|
+
info="The API Key to use for the vLLM model (optional for local servers).",
|
|
58
|
+
advanced=False,
|
|
59
|
+
value="",
|
|
60
|
+
required=False,
|
|
61
|
+
),
|
|
62
|
+
SliderInput(
|
|
63
|
+
name="temperature",
|
|
64
|
+
display_name="Temperature",
|
|
65
|
+
value=0.1,
|
|
66
|
+
range_spec=RangeSpec(min=0, max=1, step=0.01),
|
|
67
|
+
show=True,
|
|
68
|
+
),
|
|
69
|
+
IntInput(
|
|
70
|
+
name="seed",
|
|
71
|
+
display_name="Seed",
|
|
72
|
+
info="Controls the reproducibility of the job. Set to -1 to disable (some providers may not support).",
|
|
73
|
+
advanced=True,
|
|
74
|
+
value=-1,
|
|
75
|
+
required=False,
|
|
76
|
+
),
|
|
77
|
+
IntInput(
|
|
78
|
+
name="max_retries",
|
|
79
|
+
display_name="Max Retries",
|
|
80
|
+
info="Max retries when generating. Set to -1 to disable (some providers may not support).",
|
|
81
|
+
advanced=True,
|
|
82
|
+
value=-1,
|
|
83
|
+
required=False,
|
|
84
|
+
),
|
|
85
|
+
IntInput(
|
|
86
|
+
name="timeout",
|
|
87
|
+
display_name="Timeout",
|
|
88
|
+
info="Timeout for requests to vLLM completion API. Set to -1 to disable (some providers may not support).",
|
|
89
|
+
advanced=True,
|
|
90
|
+
value=-1,
|
|
91
|
+
required=False,
|
|
92
|
+
),
|
|
93
|
+
]
|
|
94
|
+
|
|
95
|
+
def build_model(self) -> LanguageModel: # type: ignore[type-var]
|
|
96
|
+
logger.debug(f"Executing request with vLLM model: {self.model_name}")
|
|
97
|
+
parameters = {
|
|
98
|
+
"api_key": SecretStr(self.api_key).get_secret_value() if self.api_key else None,
|
|
99
|
+
"model_name": self.model_name,
|
|
100
|
+
"max_tokens": self.max_tokens or None,
|
|
101
|
+
"model_kwargs": self.model_kwargs or {},
|
|
102
|
+
"base_url": self.api_base or "http://localhost:8000/v1",
|
|
103
|
+
"temperature": self.temperature if self.temperature is not None else 0.1,
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
# Only add optional parameters if explicitly set (not -1)
|
|
107
|
+
if self.seed is not None and self.seed != -1:
|
|
108
|
+
parameters["seed"] = self.seed
|
|
109
|
+
if self.timeout is not None and self.timeout != -1:
|
|
110
|
+
parameters["timeout"] = self.timeout
|
|
111
|
+
if self.max_retries is not None and self.max_retries != -1:
|
|
112
|
+
parameters["max_retries"] = self.max_retries
|
|
113
|
+
|
|
114
|
+
output = ChatOpenAI(**parameters)
|
|
115
|
+
if self.json_mode:
|
|
116
|
+
output = output.bind(response_format={"type": "json_object"})
|
|
117
|
+
|
|
118
|
+
return output
|
|
119
|
+
|
|
120
|
+
def _get_exception_message(self, e: Exception):
|
|
121
|
+
"""Get a message from a vLLM exception.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
e (Exception): The exception to get the message from.
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
str: The message from the exception.
|
|
128
|
+
"""
|
|
129
|
+
try:
|
|
130
|
+
from openai import BadRequestError
|
|
131
|
+
except ImportError:
|
|
132
|
+
return None
|
|
133
|
+
if isinstance(e, BadRequestError):
|
|
134
|
+
message = e.body.get("message")
|
|
135
|
+
if message:
|
|
136
|
+
return message
|
|
137
|
+
return None
|
|
138
|
+
|
|
139
|
+
def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict: # noqa: ARG002
|
|
140
|
+
# vLLM models support all parameters, so no special handling needed
|
|
141
|
+
return build_config
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
from langchain_openai import OpenAIEmbeddings
|
|
2
|
+
|
|
3
|
+
from lfx.base.embeddings.model import LCEmbeddingsModel
|
|
4
|
+
from lfx.field_typing import Embeddings
|
|
5
|
+
from lfx.io import BoolInput, DictInput, FloatInput, IntInput, MessageTextInput, SecretStrInput
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class VllmEmbeddingsComponent(LCEmbeddingsModel):
|
|
9
|
+
display_name = "vLLM Embeddings"
|
|
10
|
+
description = "Generate embeddings using vLLM models via OpenAI-compatible API."
|
|
11
|
+
icon = "vLLM"
|
|
12
|
+
name = "vLLMEmbeddings"
|
|
13
|
+
|
|
14
|
+
inputs = [
|
|
15
|
+
MessageTextInput(
|
|
16
|
+
name="model_name",
|
|
17
|
+
display_name="Model Name",
|
|
18
|
+
advanced=False,
|
|
19
|
+
info="The name of the vLLM embeddings model to use (e.g., 'BAAI/bge-large-en-v1.5').",
|
|
20
|
+
value="BAAI/bge-large-en-v1.5",
|
|
21
|
+
),
|
|
22
|
+
MessageTextInput(
|
|
23
|
+
name="api_base",
|
|
24
|
+
display_name="vLLM API Base",
|
|
25
|
+
advanced=False,
|
|
26
|
+
info="The base URL of the vLLM API server. Defaults to http://localhost:8000/v1 for local vLLM server.",
|
|
27
|
+
value="http://localhost:8000/v1",
|
|
28
|
+
),
|
|
29
|
+
SecretStrInput(
|
|
30
|
+
name="api_key",
|
|
31
|
+
display_name="API Key",
|
|
32
|
+
info="The API Key to use for the vLLM model (optional for local servers).",
|
|
33
|
+
advanced=False,
|
|
34
|
+
value="",
|
|
35
|
+
required=False,
|
|
36
|
+
),
|
|
37
|
+
IntInput(
|
|
38
|
+
name="dimensions",
|
|
39
|
+
display_name="Dimensions",
|
|
40
|
+
info="The number of dimensions the resulting output embeddings should have. "
|
|
41
|
+
"Only supported by certain models.",
|
|
42
|
+
advanced=True,
|
|
43
|
+
),
|
|
44
|
+
IntInput(
|
|
45
|
+
name="chunk_size",
|
|
46
|
+
display_name="Chunk Size",
|
|
47
|
+
advanced=True,
|
|
48
|
+
value=1000,
|
|
49
|
+
info="The chunk size to use when processing documents.",
|
|
50
|
+
),
|
|
51
|
+
IntInput(
|
|
52
|
+
name="max_retries",
|
|
53
|
+
display_name="Max Retries",
|
|
54
|
+
value=3,
|
|
55
|
+
advanced=True,
|
|
56
|
+
info="Maximum number of retries for failed requests.",
|
|
57
|
+
),
|
|
58
|
+
FloatInput(
|
|
59
|
+
name="request_timeout",
|
|
60
|
+
display_name="Request Timeout",
|
|
61
|
+
advanced=True,
|
|
62
|
+
info="Timeout for requests to vLLM API in seconds.",
|
|
63
|
+
),
|
|
64
|
+
BoolInput(
|
|
65
|
+
name="show_progress_bar",
|
|
66
|
+
display_name="Show Progress Bar",
|
|
67
|
+
advanced=True,
|
|
68
|
+
info="Whether to show a progress bar when processing multiple documents.",
|
|
69
|
+
),
|
|
70
|
+
BoolInput(
|
|
71
|
+
name="skip_empty",
|
|
72
|
+
display_name="Skip Empty",
|
|
73
|
+
advanced=True,
|
|
74
|
+
info="Whether to skip empty documents.",
|
|
75
|
+
),
|
|
76
|
+
DictInput(
|
|
77
|
+
name="model_kwargs",
|
|
78
|
+
display_name="Model Kwargs",
|
|
79
|
+
advanced=True,
|
|
80
|
+
info="Additional keyword arguments to pass to the model.",
|
|
81
|
+
),
|
|
82
|
+
DictInput(
|
|
83
|
+
name="default_headers",
|
|
84
|
+
display_name="Default Headers",
|
|
85
|
+
advanced=True,
|
|
86
|
+
info="Default headers to use for the API request.",
|
|
87
|
+
),
|
|
88
|
+
DictInput(
|
|
89
|
+
name="default_query",
|
|
90
|
+
display_name="Default Query",
|
|
91
|
+
advanced=True,
|
|
92
|
+
info="Default query parameters to use for the API request.",
|
|
93
|
+
),
|
|
94
|
+
]
|
|
95
|
+
|
|
96
|
+
def build_embeddings(self) -> Embeddings:
|
|
97
|
+
return OpenAIEmbeddings(
|
|
98
|
+
model=self.model_name,
|
|
99
|
+
base_url=self.api_base or "http://localhost:8000/v1",
|
|
100
|
+
api_key=self.api_key or None,
|
|
101
|
+
dimensions=self.dimensions or None,
|
|
102
|
+
chunk_size=self.chunk_size,
|
|
103
|
+
max_retries=self.max_retries,
|
|
104
|
+
timeout=self.request_timeout or None,
|
|
105
|
+
show_progress_bar=self.show_progress_bar,
|
|
106
|
+
skip_empty=self.skip_empty,
|
|
107
|
+
model_kwargs=self.model_kwargs,
|
|
108
|
+
default_headers=self.default_headers or None,
|
|
109
|
+
default_query=self.default_query or None,
|
|
110
|
+
)
|
|
@@ -1565,7 +1565,24 @@ class Component(CustomComponent):
|
|
|
1565
1565
|
return has_chat_input(self.graph.get_vertex_neighbors(self._vertex))
|
|
1566
1566
|
|
|
1567
1567
|
def _should_skip_message(self, message: Message) -> bool:
|
|
1568
|
-
"""Check if the message should be skipped based on vertex configuration and message type.
|
|
1568
|
+
"""Check if the message should be skipped based on vertex configuration and message type.
|
|
1569
|
+
|
|
1570
|
+
When a message is skipped:
|
|
1571
|
+
- It is NOT stored in the database
|
|
1572
|
+
- It will NOT have an ID (message.get_id() will return None)
|
|
1573
|
+
- It is still returned to the caller, but no events are sent to the frontend
|
|
1574
|
+
|
|
1575
|
+
Messages are skipped when:
|
|
1576
|
+
- The component is not an input or output vertex
|
|
1577
|
+
- The component is not connected to a Chat Output
|
|
1578
|
+
- The message is not an ErrorMessage
|
|
1579
|
+
|
|
1580
|
+
This prevents intermediate components from cluttering the database with messages
|
|
1581
|
+
that aren't meant to be displayed in the chat UI.
|
|
1582
|
+
|
|
1583
|
+
Returns:
|
|
1584
|
+
bool: True if the message should be skipped, False otherwise
|
|
1585
|
+
"""
|
|
1569
1586
|
return (
|
|
1570
1587
|
self._vertex is not None
|
|
1571
1588
|
and not (self._vertex.is_output or self._vertex.is_input)
|
|
@@ -1603,12 +1620,31 @@ class Component(CustomComponent):
|
|
|
1603
1620
|
async def send_message(self, message: Message, id_: str | None = None, *, skip_db_update: bool = False):
|
|
1604
1621
|
"""Send a message with optional database update control.
|
|
1605
1622
|
|
|
1623
|
+
This is the central method for sending messages in Langflow. It handles:
|
|
1624
|
+
- Message storage in the database (unless skipped)
|
|
1625
|
+
- Event emission to the frontend
|
|
1626
|
+
- Streaming support
|
|
1627
|
+
- Error handling and cleanup
|
|
1628
|
+
|
|
1629
|
+
Message ID Rules:
|
|
1630
|
+
- Messages only have an ID after being stored in the database
|
|
1631
|
+
- If _should_skip_message() returns True, the message is not stored and will not have an ID
|
|
1632
|
+
- Always use message.get_id() or message.has_id() to safely check for ID existence
|
|
1633
|
+
- Never access message.id directly without checking if it exists first
|
|
1634
|
+
|
|
1606
1635
|
Args:
|
|
1607
1636
|
message: The message to send
|
|
1608
|
-
id_: Optional message ID
|
|
1637
|
+
id_: Optional message ID (used for event emission, not database storage)
|
|
1609
1638
|
skip_db_update: If True, only update in-memory and send event, skip DB write.
|
|
1610
1639
|
Useful during streaming to avoid excessive DB round-trips.
|
|
1611
|
-
Note:
|
|
1640
|
+
Note: When skip_db_update=True, the message must already have an ID
|
|
1641
|
+
(i.e., it must have been stored previously).
|
|
1642
|
+
|
|
1643
|
+
Returns:
|
|
1644
|
+
Message: The stored message (with ID if stored in database, without ID if skipped)
|
|
1645
|
+
|
|
1646
|
+
Raises:
|
|
1647
|
+
ValueError: If skip_db_update=True but message doesn't have an ID
|
|
1612
1648
|
"""
|
|
1613
1649
|
if self._should_skip_message(message):
|
|
1614
1650
|
return message
|
|
@@ -1621,10 +1657,18 @@ class Component(CustomComponent):
|
|
|
1621
1657
|
|
|
1622
1658
|
# If skip_db_update is True and message already has an ID, skip the DB write
|
|
1623
1659
|
# This path is used during agent streaming to avoid excessive DB round-trips
|
|
1624
|
-
|
|
1660
|
+
# When skip_db_update=True, we require the message to already have an ID
|
|
1661
|
+
# because we're updating an existing message, not creating a new one
|
|
1662
|
+
if skip_db_update:
|
|
1663
|
+
if not message.has_id():
|
|
1664
|
+
msg = (
|
|
1665
|
+
"skip_db_update=True requires the message to already have an ID. "
|
|
1666
|
+
"The message must have been stored in the database previously."
|
|
1667
|
+
)
|
|
1668
|
+
raise ValueError(msg)
|
|
1625
1669
|
# Create a fresh Message instance for consistency with normal flow
|
|
1626
1670
|
stored_message = await Message.create(**message.model_dump())
|
|
1627
|
-
self._stored_message_id = stored_message.
|
|
1671
|
+
self._stored_message_id = stored_message.get_id()
|
|
1628
1672
|
# Still send the event to update the client in real-time
|
|
1629
1673
|
# Note: If this fails, we don't need DB cleanup since we didn't write to DB
|
|
1630
1674
|
await self._send_message_event(stored_message, id_=id_)
|
|
@@ -1632,7 +1676,9 @@ class Component(CustomComponent):
|
|
|
1632
1676
|
# Normal flow: store/update in database
|
|
1633
1677
|
stored_message = await self._store_message(message)
|
|
1634
1678
|
|
|
1635
|
-
|
|
1679
|
+
# After _store_message, the message should always have an ID
|
|
1680
|
+
# but we use get_id() for safety
|
|
1681
|
+
self._stored_message_id = stored_message.get_id()
|
|
1636
1682
|
try:
|
|
1637
1683
|
complete_message = ""
|
|
1638
1684
|
if (
|
|
@@ -1653,7 +1699,10 @@ class Component(CustomComponent):
|
|
|
1653
1699
|
await self._send_message_event(stored_message, id_=id_)
|
|
1654
1700
|
except Exception:
|
|
1655
1701
|
# remove the message from the database
|
|
1656
|
-
|
|
1702
|
+
# Only delete if the message has an ID
|
|
1703
|
+
message_id = stored_message.get_id()
|
|
1704
|
+
if message_id:
|
|
1705
|
+
await delete_message(id_=message_id)
|
|
1657
1706
|
raise
|
|
1658
1707
|
self.status = stored_message
|
|
1659
1708
|
return stored_message
|
|
@@ -1699,7 +1748,7 @@ class Component(CustomComponent):
|
|
|
1699
1748
|
return bool(
|
|
1700
1749
|
hasattr(self, "_event_manager")
|
|
1701
1750
|
and self._event_manager
|
|
1702
|
-
and stored_message.
|
|
1751
|
+
and stored_message.has_id()
|
|
1703
1752
|
and not isinstance(original_message.text, str)
|
|
1704
1753
|
)
|
|
1705
1754
|
|
|
@@ -1726,14 +1775,20 @@ class Component(CustomComponent):
|
|
|
1726
1775
|
msg = "The message must be an iterator or an async iterator."
|
|
1727
1776
|
raise TypeError(msg)
|
|
1728
1777
|
|
|
1778
|
+
# Get message ID safely - streaming requires an ID
|
|
1779
|
+
message_id = message.get_id()
|
|
1780
|
+
if not message_id:
|
|
1781
|
+
msg = "Message must have an ID to stream. Messages only have IDs after being stored in the database."
|
|
1782
|
+
raise ValueError(msg)
|
|
1783
|
+
|
|
1729
1784
|
if isinstance(iterator, AsyncIterator):
|
|
1730
|
-
return await self._handle_async_iterator(iterator,
|
|
1785
|
+
return await self._handle_async_iterator(iterator, message_id, message)
|
|
1731
1786
|
try:
|
|
1732
1787
|
complete_message = ""
|
|
1733
1788
|
first_chunk = True
|
|
1734
1789
|
for chunk in iterator:
|
|
1735
1790
|
complete_message = await self._process_chunk(
|
|
1736
|
-
chunk.content, complete_message,
|
|
1791
|
+
chunk.content, complete_message, message_id, message, first_chunk=first_chunk
|
|
1737
1792
|
)
|
|
1738
1793
|
first_chunk = False
|
|
1739
1794
|
except Exception as e:
|
|
@@ -456,24 +456,26 @@ class CustomComponent(BaseComponent):
|
|
|
456
456
|
"""Returns the variable for the current user with the specified name.
|
|
457
457
|
|
|
458
458
|
Raises:
|
|
459
|
-
ValueError: If the user id is not set.
|
|
459
|
+
ValueError: If the user id is not set and variable not found in context.
|
|
460
460
|
|
|
461
461
|
Returns:
|
|
462
462
|
The variable for the current user with the specified name.
|
|
463
463
|
"""
|
|
464
|
-
if hasattr(self, "_user_id") and not self.user_id:
|
|
465
|
-
msg = f"User id is not set for {self.__class__.__name__}"
|
|
466
|
-
raise ValueError(msg)
|
|
467
|
-
|
|
468
464
|
# Check graph context for request-level variable overrides first
|
|
465
|
+
# This allows run_flow to work without user_id when variables are passed
|
|
469
466
|
if hasattr(self, "graph") and self.graph and hasattr(self.graph, "context"):
|
|
470
467
|
context = self.graph.context
|
|
471
468
|
if context and "request_variables" in context:
|
|
472
469
|
request_variables = context["request_variables"]
|
|
473
470
|
if name in request_variables:
|
|
474
|
-
logger.debug(f"Found context override for variable '{name}'
|
|
471
|
+
logger.debug(f"Found context override for variable '{name}'")
|
|
475
472
|
return request_variables[name]
|
|
476
473
|
|
|
474
|
+
# Only check user_id when we need to access the database
|
|
475
|
+
if hasattr(self, "_user_id") and not self.user_id:
|
|
476
|
+
msg = f"User id is not set for {self.__class__.__name__}"
|
|
477
|
+
raise ValueError(msg)
|
|
478
|
+
|
|
477
479
|
variable_service = get_variable_service() # Get service instance
|
|
478
480
|
# Retrieve and decrypt the variable by name for the current user
|
|
479
481
|
if isinstance(self.user_id, str):
|
|
File without changes
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
from collections.abc import Awaitable, Callable
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from ag_ui.encoder.encoder import EventEncoder
|
|
6
|
+
|
|
7
|
+
from lfx.log.logger import logger
|
|
8
|
+
|
|
9
|
+
AsyncMethod = Callable[..., Awaitable[Any]]
|
|
10
|
+
|
|
11
|
+
encoder: EventEncoder = EventEncoder()
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def observable(observed_method: AsyncMethod) -> AsyncMethod:
|
|
15
|
+
"""Decorator to make an async method observable by emitting lifecycle events.
|
|
16
|
+
|
|
17
|
+
Decorated classes are expected to implement specific methods to emit AGUI events:
|
|
18
|
+
- `before_callback_event(*args, **kwargs)`: Called before the decorated method executes.
|
|
19
|
+
It should return a dictionary representing the event payload.
|
|
20
|
+
- `after_callback_event(result, *args, **kwargs)`: Called after the decorated method
|
|
21
|
+
successfully completes. It should return a dictionary representing the event payload.
|
|
22
|
+
The `result` of the decorated method is passed as the first argument.
|
|
23
|
+
- `error_callback_event(exception, *args, **kwargs)`: (Optional) Called if the decorated
|
|
24
|
+
method raises an exception. It should return a dictionary representing the error event payload.
|
|
25
|
+
The `exception` is passed as the first argument.
|
|
26
|
+
|
|
27
|
+
If these methods are implemented, the decorator will call them to generate event payloads.
|
|
28
|
+
If an implementation is missing, the corresponding event publishing will be skipped without error.
|
|
29
|
+
|
|
30
|
+
Payloads returned by these methods can include custom metrics by placing them
|
|
31
|
+
under the 'langflow' key within the 'raw_events' dictionary.
|
|
32
|
+
|
|
33
|
+
Example:
|
|
34
|
+
class MyClass:
|
|
35
|
+
display_name = "My Observable Class"
|
|
36
|
+
|
|
37
|
+
def before_callback_event(self, *args, **kwargs):
|
|
38
|
+
return {"event_name": "my_method_started", "data": {"input_args": args}}
|
|
39
|
+
|
|
40
|
+
async def my_method(self, event_manager: EventManager, data: str):
|
|
41
|
+
# ... method logic ...
|
|
42
|
+
return "processed_data"
|
|
43
|
+
|
|
44
|
+
def after_callback_event(self, result, *args, **kwargs):
|
|
45
|
+
return {"event_name": "my_method_completed", "data": {"output": result}}
|
|
46
|
+
|
|
47
|
+
def error_callback_event(self, exception, *args, **kwargs):
|
|
48
|
+
return {"event_name": "my_method_failed", "error": str(exception)}
|
|
49
|
+
|
|
50
|
+
@observable
|
|
51
|
+
async def my_observable_method(self, event_manager: EventManager, data: str):
|
|
52
|
+
# ... method logic ...
|
|
53
|
+
pass
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
async def check_event_manager(self, **kwargs):
|
|
57
|
+
if "event_manager" not in kwargs or kwargs["event_manager"] is None:
|
|
58
|
+
await logger.awarning(
|
|
59
|
+
f"EventManager not available/provided, skipping observable event publishing "
|
|
60
|
+
f"from {self.__class__.__name__}"
|
|
61
|
+
)
|
|
62
|
+
return False
|
|
63
|
+
return True
|
|
64
|
+
|
|
65
|
+
async def before_callback(self, *args, **kwargs):
|
|
66
|
+
if not await check_event_manager(self, **kwargs):
|
|
67
|
+
return
|
|
68
|
+
|
|
69
|
+
if hasattr(self, "before_callback_event"):
|
|
70
|
+
event_payload = self.before_callback_event(*args, **kwargs)
|
|
71
|
+
event_payload = encoder.encode(event_payload)
|
|
72
|
+
# TODO: Publish event per request, would required context based queues
|
|
73
|
+
else:
|
|
74
|
+
await logger.awarning(
|
|
75
|
+
f"before_callback_event not implemented for {self.__class__.__name__}. Skipping event publishing."
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
async def after_callback(self, res: Any | None = None, *args, **kwargs):
|
|
79
|
+
if not await check_event_manager(self, **kwargs):
|
|
80
|
+
return
|
|
81
|
+
if hasattr(self, "after_callback_event"):
|
|
82
|
+
event_payload = self.after_callback_event(res, *args, **kwargs)
|
|
83
|
+
event_payload = encoder.encode(event_payload)
|
|
84
|
+
# TODO: Publish event per request, would required context based queues
|
|
85
|
+
else:
|
|
86
|
+
await logger.awarning(
|
|
87
|
+
f"after_callback_event not implemented for {self.__class__.__name__}. Skipping event publishing."
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
@functools.wraps(observed_method)
|
|
91
|
+
async def wrapper(self, *args, **kwargs):
|
|
92
|
+
await before_callback(self, *args, **kwargs)
|
|
93
|
+
result = None
|
|
94
|
+
try:
|
|
95
|
+
result = await observed_method(self, *args, **kwargs)
|
|
96
|
+
await after_callback(self, result, *args, **kwargs)
|
|
97
|
+
except Exception as e:
|
|
98
|
+
await logger.aerror(f"Exception in {self.__class__.__name__}: {e}")
|
|
99
|
+
if hasattr(self, "error_callback_event"):
|
|
100
|
+
try:
|
|
101
|
+
event_payload = self.error_callback_event(e, *args, **kwargs)
|
|
102
|
+
event_payload = encoder.encode(event_payload)
|
|
103
|
+
# TODO: Publish event per request, would required context based queues
|
|
104
|
+
except Exception as callback_e: # noqa: BLE001
|
|
105
|
+
await logger.aerror(
|
|
106
|
+
f"Exception during error_callback_event for {self.__class__.__name__}: {callback_e}"
|
|
107
|
+
)
|
|
108
|
+
raise
|
|
109
|
+
return result
|
|
110
|
+
|
|
111
|
+
return wrapper
|
lfx/field_typing/__init__.py
CHANGED
|
@@ -1,63 +1,6 @@
|
|
|
1
1
|
from typing import Any
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
AgentExecutor,
|
|
5
|
-
BaseChatMemory,
|
|
6
|
-
BaseChatModel,
|
|
7
|
-
BaseDocumentCompressor,
|
|
8
|
-
BaseLanguageModel,
|
|
9
|
-
BaseLLM,
|
|
10
|
-
BaseLoader,
|
|
11
|
-
BaseMemory,
|
|
12
|
-
BaseOutputParser,
|
|
13
|
-
BasePromptTemplate,
|
|
14
|
-
BaseRetriever,
|
|
15
|
-
Callable,
|
|
16
|
-
Chain,
|
|
17
|
-
ChatPromptTemplate,
|
|
18
|
-
Code,
|
|
19
|
-
Data,
|
|
20
|
-
Document,
|
|
21
|
-
Embeddings,
|
|
22
|
-
LanguageModel,
|
|
23
|
-
NestedDict,
|
|
24
|
-
Object,
|
|
25
|
-
PromptTemplate,
|
|
26
|
-
Retriever,
|
|
27
|
-
Text,
|
|
28
|
-
TextSplitter,
|
|
29
|
-
Tool,
|
|
30
|
-
VectorStore,
|
|
31
|
-
)
|
|
32
|
-
from .range_spec import RangeSpec
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
def _import_input_class():
|
|
36
|
-
from lfx.template.field.base import Input
|
|
37
|
-
|
|
38
|
-
return Input
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
def _import_output_class():
|
|
42
|
-
from lfx.template.field.base import Output
|
|
43
|
-
|
|
44
|
-
return Output
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
def __getattr__(name: str) -> Any:
|
|
48
|
-
# This is to avoid circular imports
|
|
49
|
-
if name == "Input":
|
|
50
|
-
return _import_input_class()
|
|
51
|
-
if name == "Output":
|
|
52
|
-
return _import_output_class()
|
|
53
|
-
if name == "RangeSpec":
|
|
54
|
-
return RangeSpec
|
|
55
|
-
# The other names should work as if they were imported from constants
|
|
56
|
-
# Import the constants module langflow.field_typing.constants
|
|
57
|
-
from . import constants
|
|
58
|
-
|
|
59
|
-
return getattr(constants, name)
|
|
60
|
-
|
|
3
|
+
# Lazy imports - nothing imported at module level except __all__
|
|
61
4
|
|
|
62
5
|
__all__ = [
|
|
63
6
|
"AgentExecutor",
|
|
@@ -78,9 +21,11 @@ __all__ = [
|
|
|
78
21
|
"Data",
|
|
79
22
|
"Document",
|
|
80
23
|
"Embeddings",
|
|
24
|
+
"Input",
|
|
81
25
|
"LanguageModel",
|
|
82
26
|
"NestedDict",
|
|
83
27
|
"Object",
|
|
28
|
+
"Output",
|
|
84
29
|
"PromptTemplate",
|
|
85
30
|
"RangeSpec",
|
|
86
31
|
"Retriever",
|
|
@@ -89,3 +34,57 @@ __all__ = [
|
|
|
89
34
|
"Tool",
|
|
90
35
|
"VectorStore",
|
|
91
36
|
]
|
|
37
|
+
|
|
38
|
+
# Names that come from constants module
|
|
39
|
+
_CONSTANTS_NAMES = {
|
|
40
|
+
"AgentExecutor",
|
|
41
|
+
"BaseChatMemory",
|
|
42
|
+
"BaseChatModel",
|
|
43
|
+
"BaseDocumentCompressor",
|
|
44
|
+
"BaseLLM",
|
|
45
|
+
"BaseLanguageModel",
|
|
46
|
+
"BaseLoader",
|
|
47
|
+
"BaseMemory",
|
|
48
|
+
"BaseOutputParser",
|
|
49
|
+
"BasePromptTemplate",
|
|
50
|
+
"BaseRetriever",
|
|
51
|
+
"Callable",
|
|
52
|
+
"Chain",
|
|
53
|
+
"ChatPromptTemplate",
|
|
54
|
+
"Code",
|
|
55
|
+
"Data",
|
|
56
|
+
"Document",
|
|
57
|
+
"Embeddings",
|
|
58
|
+
"LanguageModel",
|
|
59
|
+
"NestedDict",
|
|
60
|
+
"Object",
|
|
61
|
+
"PromptTemplate",
|
|
62
|
+
"Retriever",
|
|
63
|
+
"Text",
|
|
64
|
+
"TextSplitter",
|
|
65
|
+
"Tool",
|
|
66
|
+
"VectorStore",
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def __getattr__(name: str) -> Any:
|
|
71
|
+
"""Lazy import for all field typing constants."""
|
|
72
|
+
if name == "Input":
|
|
73
|
+
from lfx.template.field.base import Input
|
|
74
|
+
|
|
75
|
+
return Input
|
|
76
|
+
if name == "Output":
|
|
77
|
+
from lfx.template.field.base import Output
|
|
78
|
+
|
|
79
|
+
return Output
|
|
80
|
+
if name == "RangeSpec":
|
|
81
|
+
from .range_spec import RangeSpec
|
|
82
|
+
|
|
83
|
+
return RangeSpec
|
|
84
|
+
if name in _CONSTANTS_NAMES:
|
|
85
|
+
from . import constants
|
|
86
|
+
|
|
87
|
+
return getattr(constants, name)
|
|
88
|
+
|
|
89
|
+
msg = f"module {__name__!r} has no attribute {name!r}"
|
|
90
|
+
raise AttributeError(msg)
|