lfx-nightly 0.1.12.dev34__py3-none-any.whl → 0.1.12.dev36__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lfx-nightly might be problematic. Click here for more details.
- lfx/base/composio/composio_base.py +65 -26
- lfx/base/data/base_file.py +3 -2
- lfx/base/tools/flow_tool.py +1 -1
- lfx/base/tools/run_flow.py +1 -1
- lfx/cli/commands.py +17 -12
- lfx/cli/run.py +156 -95
- lfx/components/__init__.py +3 -0
- lfx/components/deactivated/sub_flow.py +1 -1
- lfx/components/logic/flow_tool.py +1 -1
- lfx/components/logic/run_flow.py +1 -1
- lfx/components/logic/sub_flow.py +1 -1
- lfx/components/vectorstores/local_db.py +0 -1
- lfx/components/vlmrun/__init__.py +34 -0
- lfx/components/vlmrun/vlmrun_transcription.py +224 -0
- lfx/custom/custom_component/custom_component.py +1 -1
- lfx/graph/vertex/param_handler.py +2 -2
- lfx/helpers/__init__.py +129 -1
- lfx/helpers/flow.py +0 -3
- lfx/inputs/input_mixin.py +2 -1
- lfx/inputs/inputs.py +5 -14
- lfx/log/logger.py +5 -1
- lfx/memory/__init__.py +10 -30
- lfx/schema/cross_module.py +80 -0
- lfx/schema/data.py +2 -1
- lfx/services/mcp_composer/service.py +3 -2
- lfx/services/settings/base.py +31 -0
- lfx/utils/langflow_utils.py +52 -0
- {lfx_nightly-0.1.12.dev34.dist-info → lfx_nightly-0.1.12.dev36.dist-info}/METADATA +1 -1
- {lfx_nightly-0.1.12.dev34.dist-info → lfx_nightly-0.1.12.dev36.dist-info}/RECORD +31 -27
- {lfx_nightly-0.1.12.dev34.dist-info → lfx_nightly-0.1.12.dev36.dist-info}/WHEEL +0 -0
- {lfx_nightly-0.1.12.dev34.dist-info → lfx_nightly-0.1.12.dev36.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
from urllib.parse import urlparse
|
|
3
|
+
|
|
4
|
+
from langflow.custom.custom_component.component import Component
|
|
5
|
+
from langflow.io import (
|
|
6
|
+
DropdownInput,
|
|
7
|
+
FileInput,
|
|
8
|
+
IntInput,
|
|
9
|
+
MessageTextInput,
|
|
10
|
+
Output,
|
|
11
|
+
SecretStrInput,
|
|
12
|
+
)
|
|
13
|
+
from langflow.schema.data import Data
|
|
14
|
+
from loguru import logger
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class VLMRunTranscription(Component):
|
|
18
|
+
display_name = "VLM Run Transcription"
|
|
19
|
+
description = "Extract structured data from audio and video using [VLM Run AI](https://app.vlm.run)"
|
|
20
|
+
documentation = "https://docs.vlm.run"
|
|
21
|
+
icon = "VLMRun"
|
|
22
|
+
beta = True
|
|
23
|
+
|
|
24
|
+
inputs = [
|
|
25
|
+
SecretStrInput(
|
|
26
|
+
name="api_key",
|
|
27
|
+
display_name="VLM Run API Key",
|
|
28
|
+
info="Get your API key from https://app.vlm.run",
|
|
29
|
+
required=True,
|
|
30
|
+
),
|
|
31
|
+
DropdownInput(
|
|
32
|
+
name="media_type",
|
|
33
|
+
display_name="Media Type",
|
|
34
|
+
options=["audio", "video"],
|
|
35
|
+
value="audio",
|
|
36
|
+
info="Select the type of media to process",
|
|
37
|
+
),
|
|
38
|
+
FileInput(
|
|
39
|
+
name="media_files",
|
|
40
|
+
display_name="Media Files",
|
|
41
|
+
file_types=[
|
|
42
|
+
"mp3",
|
|
43
|
+
"wav",
|
|
44
|
+
"m4a",
|
|
45
|
+
"flac",
|
|
46
|
+
"ogg",
|
|
47
|
+
"opus",
|
|
48
|
+
"webm",
|
|
49
|
+
"aac",
|
|
50
|
+
"mp4",
|
|
51
|
+
"mov",
|
|
52
|
+
"avi",
|
|
53
|
+
"mkv",
|
|
54
|
+
"flv",
|
|
55
|
+
"wmv",
|
|
56
|
+
"m4v",
|
|
57
|
+
],
|
|
58
|
+
info="Upload one or more audio/video files",
|
|
59
|
+
required=False,
|
|
60
|
+
is_list=True,
|
|
61
|
+
),
|
|
62
|
+
MessageTextInput(
|
|
63
|
+
name="media_url",
|
|
64
|
+
display_name="Media URL",
|
|
65
|
+
info="URL to media file (alternative to file upload)",
|
|
66
|
+
required=False,
|
|
67
|
+
advanced=True,
|
|
68
|
+
),
|
|
69
|
+
IntInput(
|
|
70
|
+
name="timeout_seconds",
|
|
71
|
+
display_name="Timeout (seconds)",
|
|
72
|
+
value=600,
|
|
73
|
+
info="Maximum time to wait for processing completion",
|
|
74
|
+
advanced=True,
|
|
75
|
+
),
|
|
76
|
+
DropdownInput(
|
|
77
|
+
name="domain",
|
|
78
|
+
display_name="Processing Domain",
|
|
79
|
+
options=["transcription"],
|
|
80
|
+
value="transcription",
|
|
81
|
+
info="Select the processing domain",
|
|
82
|
+
advanced=True,
|
|
83
|
+
),
|
|
84
|
+
]
|
|
85
|
+
|
|
86
|
+
outputs = [
|
|
87
|
+
Output(
|
|
88
|
+
display_name="Result",
|
|
89
|
+
name="result",
|
|
90
|
+
method="process_media",
|
|
91
|
+
),
|
|
92
|
+
]
|
|
93
|
+
|
|
94
|
+
def _check_inputs(self) -> str | None:
|
|
95
|
+
"""Validate that either media files or URL is provided."""
|
|
96
|
+
if not self.media_files and not self.media_url:
|
|
97
|
+
return "Either media files or media URL must be provided"
|
|
98
|
+
return None
|
|
99
|
+
|
|
100
|
+
def _import_vlmrun(self):
|
|
101
|
+
"""Import and return VLMRun client class."""
|
|
102
|
+
try:
|
|
103
|
+
from vlmrun.client import VLMRun
|
|
104
|
+
except ImportError as e:
|
|
105
|
+
error_msg = "VLM Run SDK not installed. Run: pip install 'vlmrun[all]'"
|
|
106
|
+
raise ImportError(error_msg) from e
|
|
107
|
+
else:
|
|
108
|
+
return VLMRun
|
|
109
|
+
|
|
110
|
+
def _generate_media_response(self, client, media_source):
|
|
111
|
+
"""Generate response for audio or video media."""
|
|
112
|
+
domain_str = f"{self.media_type}.{self.domain}"
|
|
113
|
+
|
|
114
|
+
if self.media_type == "audio":
|
|
115
|
+
if isinstance(media_source, Path):
|
|
116
|
+
return client.audio.generate(file=media_source, domain=domain_str, batch=True)
|
|
117
|
+
return client.audio.generate(url=media_source, domain=domain_str, batch=True)
|
|
118
|
+
# video
|
|
119
|
+
if isinstance(media_source, Path):
|
|
120
|
+
return client.video.generate(file=media_source, domain=domain_str, batch=True)
|
|
121
|
+
return client.video.generate(url=media_source, domain=domain_str, batch=True)
|
|
122
|
+
|
|
123
|
+
def _wait_for_response(self, client, response):
|
|
124
|
+
"""Wait for batch processing to complete if needed."""
|
|
125
|
+
if hasattr(response, "id"):
|
|
126
|
+
return client.predictions.wait(response.id, timeout=self.timeout_seconds)
|
|
127
|
+
return response
|
|
128
|
+
|
|
129
|
+
def _extract_transcription(self, segments: list) -> list[str]:
|
|
130
|
+
"""Extract transcription parts from segments."""
|
|
131
|
+
transcription_parts = []
|
|
132
|
+
for segment in segments:
|
|
133
|
+
if self.media_type == "audio" and "audio" in segment:
|
|
134
|
+
transcription_parts.append(segment["audio"].get("content", ""))
|
|
135
|
+
elif self.media_type == "video" and "video" in segment:
|
|
136
|
+
transcription_parts.append(segment["video"].get("content", ""))
|
|
137
|
+
# Also include audio if available for video
|
|
138
|
+
if "audio" in segment:
|
|
139
|
+
audio_content = segment["audio"].get("content", "")
|
|
140
|
+
if audio_content and audio_content.strip():
|
|
141
|
+
transcription_parts.append(f"[Audio: {audio_content}]")
|
|
142
|
+
return transcription_parts
|
|
143
|
+
|
|
144
|
+
def _create_result_dict(self, response, transcription_parts: list, source_name: str) -> dict:
|
|
145
|
+
"""Create a standardized result dictionary."""
|
|
146
|
+
response_data = response.response if hasattr(response, "response") else {}
|
|
147
|
+
result = {
|
|
148
|
+
"prediction_id": response.id if hasattr(response, "id") else None,
|
|
149
|
+
"transcription": " ".join(transcription_parts),
|
|
150
|
+
"full_response": response_data,
|
|
151
|
+
"metadata": {
|
|
152
|
+
"media_type": self.media_type,
|
|
153
|
+
"duration": response_data.get("metadata", {}).get("duration", 0),
|
|
154
|
+
},
|
|
155
|
+
"usage": response.usage if hasattr(response, "usage") else None,
|
|
156
|
+
"status": response.status if hasattr(response, "status") else "completed",
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
# Add source-specific field
|
|
160
|
+
parsed_url = urlparse(source_name)
|
|
161
|
+
if parsed_url.scheme in ["http", "https", "s3", "gs", "ftp", "ftps"]:
|
|
162
|
+
result["source"] = source_name
|
|
163
|
+
else:
|
|
164
|
+
result["filename"] = source_name
|
|
165
|
+
|
|
166
|
+
return result
|
|
167
|
+
|
|
168
|
+
def _process_single_media(self, client, media_source, source_name: str) -> dict:
|
|
169
|
+
"""Process a single media file or URL."""
|
|
170
|
+
response = self._generate_media_response(client, media_source)
|
|
171
|
+
response = self._wait_for_response(client, response)
|
|
172
|
+
response_data = response.response if hasattr(response, "response") else {}
|
|
173
|
+
segments = response_data.get("segments", [])
|
|
174
|
+
transcription_parts = self._extract_transcription(segments)
|
|
175
|
+
return self._create_result_dict(response, transcription_parts, source_name)
|
|
176
|
+
|
|
177
|
+
def process_media(self) -> Data:
|
|
178
|
+
"""Process audio or video file and extract structured data."""
|
|
179
|
+
# Validate inputs
|
|
180
|
+
error_msg = self._check_inputs()
|
|
181
|
+
if error_msg:
|
|
182
|
+
self.status = error_msg
|
|
183
|
+
return Data(data={"error": error_msg})
|
|
184
|
+
|
|
185
|
+
try:
|
|
186
|
+
# Import and initialize client
|
|
187
|
+
vlmrun_class = self._import_vlmrun()
|
|
188
|
+
client = vlmrun_class(api_key=self.api_key)
|
|
189
|
+
all_results = []
|
|
190
|
+
|
|
191
|
+
# Handle multiple files
|
|
192
|
+
if self.media_files:
|
|
193
|
+
files_to_process = self.media_files if isinstance(self.media_files, list) else [self.media_files]
|
|
194
|
+
for idx, media_file in enumerate(files_to_process):
|
|
195
|
+
self.status = f"Processing file {idx + 1} of {len(files_to_process)}..."
|
|
196
|
+
result = self._process_single_media(client, Path(media_file), Path(media_file).name)
|
|
197
|
+
all_results.append(result)
|
|
198
|
+
|
|
199
|
+
# Handle URL
|
|
200
|
+
elif self.media_url:
|
|
201
|
+
result = self._process_single_media(client, self.media_url, self.media_url)
|
|
202
|
+
all_results.append(result)
|
|
203
|
+
|
|
204
|
+
# Return clean, flexible output structure
|
|
205
|
+
output_data = {
|
|
206
|
+
"results": all_results,
|
|
207
|
+
"total_files": len(all_results),
|
|
208
|
+
}
|
|
209
|
+
self.status = f"Successfully processed {len(all_results)} file(s)"
|
|
210
|
+
return Data(data=output_data)
|
|
211
|
+
|
|
212
|
+
except ImportError as e:
|
|
213
|
+
self.status = str(e)
|
|
214
|
+
return Data(data={"error": str(e)})
|
|
215
|
+
except (ValueError, ConnectionError, TimeoutError) as e:
|
|
216
|
+
logger.opt(exception=True).debug("Error processing media with VLM Run")
|
|
217
|
+
error_msg = f"Processing failed: {e!s}"
|
|
218
|
+
self.status = error_msg
|
|
219
|
+
return Data(data={"error": error_msg})
|
|
220
|
+
except (AttributeError, KeyError, OSError) as e:
|
|
221
|
+
logger.opt(exception=True).debug("Unexpected error processing media with VLM Run")
|
|
222
|
+
error_msg = f"Unexpected error: {e!s}"
|
|
223
|
+
self.status = error_msg
|
|
224
|
+
return Data(data={"error": error_msg})
|
|
@@ -12,7 +12,7 @@ from pydantic import BaseModel
|
|
|
12
12
|
|
|
13
13
|
from lfx.custom import validate
|
|
14
14
|
from lfx.custom.custom_component.base_component import BaseComponent
|
|
15
|
-
from lfx.helpers
|
|
15
|
+
from lfx.helpers import list_flows, load_flow, run_flow
|
|
16
16
|
from lfx.log.logger import logger
|
|
17
17
|
from lfx.schema.data import Data
|
|
18
18
|
from lfx.services.deps import get_storage_service, get_variable_service, session_scope
|
|
@@ -161,7 +161,7 @@ class ParameterHandler:
|
|
|
161
161
|
elif field.get("required"):
|
|
162
162
|
field_display_name = field.get("display_name")
|
|
163
163
|
logger.warning(
|
|
164
|
-
"File path not found for
|
|
164
|
+
"File path not found for %s in component %s. Setting to None.",
|
|
165
165
|
field_display_name,
|
|
166
166
|
self.vertex.display_name,
|
|
167
167
|
)
|
|
@@ -255,7 +255,7 @@ class ParameterHandler:
|
|
|
255
255
|
else:
|
|
256
256
|
params[field_name] = ast.literal_eval(val) if val else None
|
|
257
257
|
except Exception: # noqa: BLE001
|
|
258
|
-
logger.debug("Error evaluating code for
|
|
258
|
+
logger.debug("Error evaluating code for %s", field_name)
|
|
259
259
|
params[field_name] = val
|
|
260
260
|
return params
|
|
261
261
|
|
lfx/helpers/__init__.py
CHANGED
|
@@ -1 +1,129 @@
|
|
|
1
|
-
"""Helpers module for lfx package.
|
|
1
|
+
"""Helpers module for the lfx package.
|
|
2
|
+
|
|
3
|
+
This module automatically chooses between the full langflow implementation
|
|
4
|
+
(when available) and the lfx implementation (when standalone).
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from lfx.utils.langflow_utils import has_langflow_memory
|
|
8
|
+
|
|
9
|
+
# Import the appropriate implementation
|
|
10
|
+
if has_langflow_memory():
|
|
11
|
+
try:
|
|
12
|
+
# Import full langflow implementation
|
|
13
|
+
# Base Model
|
|
14
|
+
from langflow.helpers.base_model import (
|
|
15
|
+
BaseModel,
|
|
16
|
+
SchemaField,
|
|
17
|
+
build_model_from_schema,
|
|
18
|
+
coalesce_bool,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
# Custom
|
|
22
|
+
from langflow.helpers.custom import (
|
|
23
|
+
format_type,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
# Data
|
|
27
|
+
from langflow.helpers.data import (
|
|
28
|
+
clean_string,
|
|
29
|
+
data_to_text,
|
|
30
|
+
data_to_text_list,
|
|
31
|
+
docs_to_data,
|
|
32
|
+
safe_convert,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
# Flow
|
|
36
|
+
from langflow.helpers.flow import (
|
|
37
|
+
build_schema_from_inputs,
|
|
38
|
+
get_arg_names,
|
|
39
|
+
get_flow_inputs,
|
|
40
|
+
list_flows,
|
|
41
|
+
load_flow,
|
|
42
|
+
run_flow,
|
|
43
|
+
)
|
|
44
|
+
except ImportError:
|
|
45
|
+
# Fallback to lfx implementation if langflow import fails
|
|
46
|
+
# Base Model
|
|
47
|
+
from lfx.helpers.base_model import (
|
|
48
|
+
BaseModel,
|
|
49
|
+
SchemaField,
|
|
50
|
+
build_model_from_schema,
|
|
51
|
+
coalesce_bool,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
# Custom
|
|
55
|
+
from lfx.helpers.custom import (
|
|
56
|
+
format_type,
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
# Data
|
|
60
|
+
from lfx.helpers.data import (
|
|
61
|
+
clean_string,
|
|
62
|
+
data_to_text,
|
|
63
|
+
data_to_text_list,
|
|
64
|
+
docs_to_data,
|
|
65
|
+
safe_convert,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
# Flow
|
|
69
|
+
from lfx.helpers.flow import (
|
|
70
|
+
build_schema_from_inputs,
|
|
71
|
+
get_arg_names,
|
|
72
|
+
get_flow_inputs,
|
|
73
|
+
list_flows,
|
|
74
|
+
load_flow,
|
|
75
|
+
run_flow,
|
|
76
|
+
)
|
|
77
|
+
else:
|
|
78
|
+
# Use lfx implementation
|
|
79
|
+
# Base Model
|
|
80
|
+
from lfx.helpers.base_model import (
|
|
81
|
+
BaseModel,
|
|
82
|
+
SchemaField,
|
|
83
|
+
build_model_from_schema,
|
|
84
|
+
coalesce_bool,
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
# Custom
|
|
88
|
+
from lfx.helpers.custom import (
|
|
89
|
+
format_type,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Data
|
|
93
|
+
from lfx.helpers.data import (
|
|
94
|
+
clean_string,
|
|
95
|
+
data_to_text,
|
|
96
|
+
data_to_text_list,
|
|
97
|
+
docs_to_data,
|
|
98
|
+
safe_convert,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Flow
|
|
102
|
+
from lfx.helpers.flow import (
|
|
103
|
+
build_schema_from_inputs,
|
|
104
|
+
get_arg_names,
|
|
105
|
+
get_flow_inputs,
|
|
106
|
+
list_flows,
|
|
107
|
+
load_flow,
|
|
108
|
+
run_flow,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
# Export the available functions
|
|
112
|
+
__all__ = [
|
|
113
|
+
"BaseModel",
|
|
114
|
+
"SchemaField",
|
|
115
|
+
"build_model_from_schema",
|
|
116
|
+
"build_schema_from_inputs",
|
|
117
|
+
"clean_string",
|
|
118
|
+
"coalesce_bool",
|
|
119
|
+
"data_to_text",
|
|
120
|
+
"data_to_text_list",
|
|
121
|
+
"docs_to_data",
|
|
122
|
+
"format_type",
|
|
123
|
+
"get_arg_names",
|
|
124
|
+
"get_flow_inputs",
|
|
125
|
+
"list_flows",
|
|
126
|
+
"load_flow",
|
|
127
|
+
"run_flow",
|
|
128
|
+
"safe_convert",
|
|
129
|
+
]
|
lfx/helpers/flow.py
CHANGED
lfx/inputs/input_mixin.py
CHANGED
|
@@ -12,6 +12,7 @@ from pydantic import (
|
|
|
12
12
|
|
|
13
13
|
from lfx.field_typing.range_spec import RangeSpec
|
|
14
14
|
from lfx.inputs.validators import CoalesceBool
|
|
15
|
+
from lfx.schema.cross_module import CrossModuleModel
|
|
15
16
|
|
|
16
17
|
|
|
17
18
|
class FieldTypes(str, Enum):
|
|
@@ -42,7 +43,7 @@ SerializableFieldTypes = Annotated[FieldTypes, PlainSerializer(lambda v: v.value
|
|
|
42
43
|
|
|
43
44
|
|
|
44
45
|
# Base mixin for common input field attributes and methods
|
|
45
|
-
class BaseInputMixin(
|
|
46
|
+
class BaseInputMixin(CrossModuleModel, validate_assignment=True): # type: ignore[call-arg]
|
|
46
47
|
model_config = ConfigDict(
|
|
47
48
|
arbitrary_types_allowed=True,
|
|
48
49
|
extra="forbid",
|
lfx/inputs/inputs.py
CHANGED
|
@@ -190,24 +190,15 @@ class MessageInput(StrInput, InputTraceMixin):
|
|
|
190
190
|
# If v is a instance of Message, then its fine
|
|
191
191
|
if isinstance(v, dict):
|
|
192
192
|
return Message(**v)
|
|
193
|
+
# Duck-typed Message check - works across module boundaries
|
|
193
194
|
if isinstance(v, Message):
|
|
195
|
+
# If it's from a different module (e.g., langflow.schema.Message),
|
|
196
|
+
# convert it to ensure we have the right type
|
|
197
|
+
if type(v).__module__ != Message.__module__:
|
|
198
|
+
return Message(**v.model_dump())
|
|
194
199
|
return v
|
|
195
|
-
# Check for Message-like objects by examining their fields
|
|
196
|
-
# This handles both langflow and lfx Message instances
|
|
197
|
-
if hasattr(v, "text") and hasattr(v, "model_dump") and callable(v.model_dump):
|
|
198
|
-
# Check if it has other Message-specific attributes
|
|
199
|
-
message_fields = {"text", "data", "sender", "session_id", "properties"}
|
|
200
|
-
obj_attrs = set(dir(v))
|
|
201
|
-
min_message_fields = 3
|
|
202
|
-
if len(message_fields.intersection(obj_attrs)) >= min_message_fields:
|
|
203
|
-
try:
|
|
204
|
-
return Message(**v.model_dump())
|
|
205
|
-
except (TypeError, ValueError):
|
|
206
|
-
# Fallback to text only if model_dump fails
|
|
207
|
-
return Message(text=v.text)
|
|
208
200
|
if isinstance(v, str | AsyncIterator | Iterator):
|
|
209
201
|
return Message(text=v)
|
|
210
|
-
# For simplified implementation, we'll skip MessageBase handling
|
|
211
202
|
msg = f"Invalid value type {type(v)}"
|
|
212
203
|
raise ValueError(msg)
|
|
213
204
|
|
lfx/log/logger.py
CHANGED
|
@@ -209,6 +209,7 @@ def configure(
|
|
|
209
209
|
log_format: str | None = None,
|
|
210
210
|
log_rotation: str | None = None,
|
|
211
211
|
cache: bool | None = None,
|
|
212
|
+
output_file=None,
|
|
212
213
|
) -> None:
|
|
213
214
|
"""Configure the logger."""
|
|
214
215
|
# Early-exit only if structlog is configured AND current min level matches the requested one.
|
|
@@ -297,11 +298,14 @@ def configure(
|
|
|
297
298
|
wrapper_class.min_level = numeric_level
|
|
298
299
|
|
|
299
300
|
# Configure structlog
|
|
301
|
+
# Default to stdout for backward compatibility, unless output_file is specified
|
|
302
|
+
log_output_file = output_file if output_file is not None else sys.stdout
|
|
303
|
+
|
|
300
304
|
structlog.configure(
|
|
301
305
|
processors=processors,
|
|
302
306
|
wrapper_class=wrapper_class,
|
|
303
307
|
context_class=dict,
|
|
304
|
-
logger_factory=structlog.PrintLoggerFactory(file=
|
|
308
|
+
logger_factory=structlog.PrintLoggerFactory(file=log_output_file)
|
|
305
309
|
if not log_file
|
|
306
310
|
else structlog.stdlib.LoggerFactory(),
|
|
307
311
|
cache_logger_on_first_use=cache if cache is not None else True,
|
lfx/memory/__init__.py
CHANGED
|
@@ -1,35 +1,15 @@
|
|
|
1
1
|
"""Memory management for lfx with dynamic loading.
|
|
2
2
|
|
|
3
|
-
This module automatically chooses between full langflow
|
|
4
|
-
(when available) and lfx
|
|
3
|
+
This module automatically chooses between the full langflow implementation
|
|
4
|
+
(when available) and the lfx implementation (when standalone).
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
-
import
|
|
7
|
+
from lfx.utils.langflow_utils import has_langflow_memory
|
|
8
8
|
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
def _has_langflow_memory():
|
|
13
|
-
"""Check if langflow.memory with database support is available."""
|
|
14
|
-
try:
|
|
15
|
-
# Check if langflow.memory and MessageTable are available
|
|
16
|
-
return importlib.util.find_spec("langflow") is not None
|
|
17
|
-
except (ImportError, ModuleNotFoundError):
|
|
18
|
-
pass
|
|
19
|
-
except Exception as e: # noqa: BLE001
|
|
20
|
-
logger.error(f"Error checking for langflow.memory: {e}")
|
|
21
|
-
return False
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
#### TODO: This _LANGFLOW_AVAILABLE implementation should be changed later ####
|
|
25
|
-
# Consider refactoring to lazy loading or a more robust service discovery mechanism
|
|
26
|
-
# that can handle runtime availability changes.
|
|
27
|
-
_LANGFLOW_AVAILABLE = _has_langflow_memory()
|
|
28
|
-
|
|
29
|
-
# Import the appropriate implementations
|
|
30
|
-
if _LANGFLOW_AVAILABLE:
|
|
9
|
+
# Import the appropriate implementation
|
|
10
|
+
if has_langflow_memory():
|
|
31
11
|
try:
|
|
32
|
-
# Import
|
|
12
|
+
# Import full langflow implementation
|
|
33
13
|
from langflow.memory import (
|
|
34
14
|
aadd_messages,
|
|
35
15
|
aadd_messagetables,
|
|
@@ -43,8 +23,8 @@ if _LANGFLOW_AVAILABLE:
|
|
|
43
23
|
get_messages,
|
|
44
24
|
store_message,
|
|
45
25
|
)
|
|
46
|
-
except
|
|
47
|
-
#
|
|
26
|
+
except ImportError:
|
|
27
|
+
# Fallback to lfx implementation if langflow import fails
|
|
48
28
|
from lfx.memory.stubs import (
|
|
49
29
|
aadd_messages,
|
|
50
30
|
aadd_messagetables,
|
|
@@ -59,7 +39,7 @@ if _LANGFLOW_AVAILABLE:
|
|
|
59
39
|
store_message,
|
|
60
40
|
)
|
|
61
41
|
else:
|
|
62
|
-
# Use lfx
|
|
42
|
+
# Use lfx implementation
|
|
63
43
|
from lfx.memory.stubs import (
|
|
64
44
|
aadd_messages,
|
|
65
45
|
aadd_messagetables,
|
|
@@ -74,7 +54,7 @@ else:
|
|
|
74
54
|
store_message,
|
|
75
55
|
)
|
|
76
56
|
|
|
77
|
-
# Export the available functions
|
|
57
|
+
# Export the available functions
|
|
78
58
|
__all__ = [
|
|
79
59
|
"aadd_messages",
|
|
80
60
|
"aadd_messagetables",
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""Cross-module BaseModel for handling re-exported classes.
|
|
2
|
+
|
|
3
|
+
This module provides a metaclass and base model that enable isinstance checks
|
|
4
|
+
to work across module boundaries for Pydantic models. This is particularly useful
|
|
5
|
+
when the same class is re-exported from different modules (e.g., lfx.Message vs
|
|
6
|
+
langflow.schema.Message) but Python's isinstance() checks fail due to different
|
|
7
|
+
module paths.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
from typing import Any
|
|
13
|
+
|
|
14
|
+
from pydantic import BaseModel
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class CrossModuleMeta(type(BaseModel)): # type: ignore[misc]
|
|
18
|
+
"""Metaclass that enables cross-module isinstance checks for Pydantic models.
|
|
19
|
+
|
|
20
|
+
This metaclass overrides __instancecheck__ to perform structural type checking
|
|
21
|
+
based on the model's fields rather than strict class identity. This allows
|
|
22
|
+
instances of the same model from different module paths to be recognized as
|
|
23
|
+
compatible.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __instancecheck__(cls, instance: Any) -> bool:
|
|
27
|
+
"""Check if instance is compatible with this class across module boundaries.
|
|
28
|
+
|
|
29
|
+
First performs a standard isinstance check. If that fails, falls back to
|
|
30
|
+
checking if the instance has all required Pydantic model attributes and
|
|
31
|
+
a compatible set of model fields.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
instance: The object to check.
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
bool: True if instance is compatible with this class.
|
|
38
|
+
"""
|
|
39
|
+
# First try standard isinstance check
|
|
40
|
+
if type.__instancecheck__(cls, instance):
|
|
41
|
+
return True
|
|
42
|
+
|
|
43
|
+
# If that fails, check for cross-module compatibility
|
|
44
|
+
# An object is cross-module compatible if it:
|
|
45
|
+
# 1. Has model_fields attribute (is a Pydantic model)
|
|
46
|
+
# 2. Has the same __class__.__name__
|
|
47
|
+
# 3. Has compatible model fields
|
|
48
|
+
if not hasattr(instance, "model_fields"):
|
|
49
|
+
return False
|
|
50
|
+
|
|
51
|
+
# Check if class names match
|
|
52
|
+
if instance.__class__.__name__ != cls.__name__:
|
|
53
|
+
return False
|
|
54
|
+
|
|
55
|
+
# Check if the instance has all required fields from cls
|
|
56
|
+
cls_fields = set(cls.model_fields.keys()) if hasattr(cls, "model_fields") else set()
|
|
57
|
+
instance_fields = set(instance.model_fields.keys())
|
|
58
|
+
|
|
59
|
+
# The instance must have at least the same fields as the class
|
|
60
|
+
# (it can have more, but not fewer required fields)
|
|
61
|
+
return cls_fields.issubset(instance_fields)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class CrossModuleModel(BaseModel, metaclass=CrossModuleMeta):
|
|
65
|
+
"""Base Pydantic model with cross-module isinstance support.
|
|
66
|
+
|
|
67
|
+
This class should be used as the base for models that may be re-exported
|
|
68
|
+
from different modules. It enables isinstance() checks to work across
|
|
69
|
+
module boundaries by using structural type checking.
|
|
70
|
+
|
|
71
|
+
Example:
|
|
72
|
+
>>> class Message(CrossModuleModel):
|
|
73
|
+
... text: str
|
|
74
|
+
...
|
|
75
|
+
>>> # Even if Message is imported from different paths:
|
|
76
|
+
>>> from lfx.schema.message import Message as LfxMessage
|
|
77
|
+
>>> from langflow.schema import Message as LangflowMessage
|
|
78
|
+
>>> msg = LfxMessage(text="hello")
|
|
79
|
+
>>> isinstance(msg, LangflowMessage) # True (with cross-module support)
|
|
80
|
+
"""
|
lfx/schema/data.py
CHANGED
|
@@ -14,6 +14,7 @@ from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
|
|
|
14
14
|
from pydantic import BaseModel, ConfigDict, model_serializer, model_validator
|
|
15
15
|
|
|
16
16
|
from lfx.log.logger import logger
|
|
17
|
+
from lfx.schema.cross_module import CrossModuleModel
|
|
17
18
|
from lfx.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_USER
|
|
18
19
|
from lfx.utils.image import create_image_content_dict
|
|
19
20
|
|
|
@@ -22,7 +23,7 @@ if TYPE_CHECKING:
|
|
|
22
23
|
from lfx.schema.message import Message
|
|
23
24
|
|
|
24
25
|
|
|
25
|
-
class Data(
|
|
26
|
+
class Data(CrossModuleModel):
|
|
26
27
|
"""Represents a record with text and optional data.
|
|
27
28
|
|
|
28
29
|
Attributes:
|
|
@@ -417,9 +417,10 @@ class MCPComposerService(Service):
|
|
|
417
417
|
startup_delay: float = 2.0,
|
|
418
418
|
) -> subprocess.Popen:
|
|
419
419
|
"""Start the MCP Composer subprocess for a specific project."""
|
|
420
|
+
settings = get_settings_service().settings
|
|
420
421
|
cmd = [
|
|
421
422
|
"uvx",
|
|
422
|
-
"mcp-composer",
|
|
423
|
+
f"mcp-composer{settings.mcp_composer_version}",
|
|
423
424
|
"--mode",
|
|
424
425
|
"sse",
|
|
425
426
|
"--sse-url",
|
|
@@ -447,7 +448,7 @@ class MCPComposerService(Service):
|
|
|
447
448
|
"oauth_server_url": "OAUTH_SERVER_URL",
|
|
448
449
|
"oauth_callback_path": "OAUTH_CALLBACK_PATH",
|
|
449
450
|
"oauth_client_id": "OAUTH_CLIENT_ID",
|
|
450
|
-
"oauth_client_secret": "OAUTH_CLIENT_SECRET",
|
|
451
|
+
"oauth_client_secret": "OAUTH_CLIENT_SECRET", # pragma: allowlist secret
|
|
451
452
|
"oauth_auth_url": "OAUTH_AUTH_URL",
|
|
452
453
|
"oauth_token_url": "OAUTH_TOKEN_URL",
|
|
453
454
|
"oauth_mcp_scope": "OAUTH_MCP_SCOPE",
|