lfx-nightly 0.2.1.dev7__py3-none-any.whl → 0.3.0.dev3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lfx/__main__.py +137 -6
- lfx/_assets/component_index.json +1 -1
- lfx/base/agents/agent.py +9 -5
- lfx/base/agents/altk_base_agent.py +5 -3
- lfx/base/agents/events.py +1 -1
- lfx/base/models/unified_models.py +1 -1
- lfx/base/models/watsonx_constants.py +10 -7
- lfx/base/prompts/api_utils.py +40 -5
- lfx/cli/__init__.py +10 -2
- lfx/cli/script_loader.py +5 -4
- lfx/cli/validation.py +6 -3
- lfx/components/datastax/astradb_assistant_manager.py +4 -2
- lfx/components/docling/docling_remote.py +1 -0
- lfx/components/langchain_utilities/ibm_granite_handler.py +211 -0
- lfx/components/langchain_utilities/tool_calling.py +24 -1
- lfx/components/llm_operations/lambda_filter.py +182 -97
- lfx/components/models_and_agents/mcp_component.py +38 -1
- lfx/components/models_and_agents/prompt.py +105 -18
- lfx/components/ollama/ollama_embeddings.py +109 -28
- lfx/components/processing/text_operations.py +580 -0
- lfx/custom/custom_component/component.py +65 -10
- lfx/events/observability/__init__.py +0 -0
- lfx/events/observability/lifecycle_events.py +111 -0
- lfx/field_typing/__init__.py +57 -58
- lfx/graph/graph/base.py +36 -0
- lfx/graph/utils.py +45 -12
- lfx/graph/vertex/base.py +71 -22
- lfx/graph/vertex/vertex_types.py +0 -5
- lfx/inputs/input_mixin.py +1 -0
- lfx/inputs/inputs.py +5 -0
- lfx/interface/components.py +24 -7
- lfx/run/base.py +47 -77
- lfx/schema/__init__.py +50 -0
- lfx/schema/message.py +85 -8
- lfx/schema/workflow.py +171 -0
- lfx/services/deps.py +12 -0
- lfx/services/interfaces.py +43 -1
- lfx/services/schema.py +1 -0
- lfx/services/settings/auth.py +95 -4
- lfx/services/settings/base.py +4 -0
- lfx/services/settings/utils.py +82 -0
- lfx/services/transaction/__init__.py +5 -0
- lfx/services/transaction/service.py +35 -0
- lfx/tests/unit/components/__init__.py +0 -0
- lfx/utils/constants.py +1 -0
- lfx/utils/mustache_security.py +79 -0
- lfx/utils/validate_cloud.py +67 -0
- {lfx_nightly-0.2.1.dev7.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/METADATA +3 -1
- {lfx_nightly-0.2.1.dev7.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/RECORD +51 -42
- {lfx_nightly-0.2.1.dev7.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/WHEEL +0 -0
- {lfx_nightly-0.2.1.dev7.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/entry_points.txt +0 -0
lfx/schema/message.py
CHANGED
|
@@ -12,10 +12,11 @@ from uuid import UUID
|
|
|
12
12
|
from fastapi.encoders import jsonable_encoder
|
|
13
13
|
from langchain_core.load import load
|
|
14
14
|
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage, ToolMessage
|
|
15
|
-
from langchain_core.prompts.chat import BaseChatPromptTemplate, ChatPromptTemplate
|
|
16
|
-
from langchain_core.prompts.prompt import PromptTemplate
|
|
17
15
|
from pydantic import BaseModel, ConfigDict, Field, ValidationError, field_serializer, field_validator
|
|
18
16
|
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from langchain_core.prompts.chat import BaseChatPromptTemplate
|
|
19
|
+
|
|
19
20
|
from lfx.base.prompts.utils import dict_values_to_string
|
|
20
21
|
from lfx.log.logger import logger
|
|
21
22
|
from lfx.schema.content_block import ContentBlock
|
|
@@ -26,12 +27,33 @@ from lfx.schema.properties import Properties, Source
|
|
|
26
27
|
from lfx.schema.validators import timestamp_to_str, timestamp_to_str_validator
|
|
27
28
|
from lfx.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_NAME_USER, MESSAGE_SENDER_USER
|
|
28
29
|
from lfx.utils.image import create_image_content_dict
|
|
30
|
+
from lfx.utils.mustache_security import safe_mustache_render
|
|
29
31
|
|
|
30
32
|
if TYPE_CHECKING:
|
|
31
33
|
from lfx.schema.dataframe import DataFrame
|
|
32
34
|
|
|
33
35
|
|
|
34
36
|
class Message(Data):
|
|
37
|
+
"""Message schema for Langflow.
|
|
38
|
+
|
|
39
|
+
Message ID Semantics:
|
|
40
|
+
- Messages only have an ID after being stored in the database
|
|
41
|
+
- Messages that are skipped (via Component._should_skip_message) will NOT have an ID
|
|
42
|
+
- Always use get_id(), has_id(), or require_id() methods to safely access the ID
|
|
43
|
+
- Never access message.id directly without checking if it exists first
|
|
44
|
+
|
|
45
|
+
Safe ID Access Patterns:
|
|
46
|
+
- Use get_id() when ID may or may not exist (returns None if missing)
|
|
47
|
+
- Use has_id() to check if ID exists before operations that require it
|
|
48
|
+
- Use require_id() when ID is required (raises ValueError if missing)
|
|
49
|
+
|
|
50
|
+
Example:
|
|
51
|
+
message_id = message.get_id() # Safe: returns None if no ID
|
|
52
|
+
if message.has_id():
|
|
53
|
+
# Safe to use message_id
|
|
54
|
+
do_something_with_id(message_id)
|
|
55
|
+
"""
|
|
56
|
+
|
|
35
57
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
36
58
|
# Helper class to deal with image data
|
|
37
59
|
text_key: str = "text"
|
|
@@ -266,24 +288,35 @@ class Message(Data):
|
|
|
266
288
|
prompt_json = prompt.to_json()
|
|
267
289
|
return cls(prompt=prompt_json)
|
|
268
290
|
|
|
269
|
-
def format_text(self):
|
|
270
|
-
|
|
291
|
+
def format_text(self, template_format="f-string"):
|
|
292
|
+
if template_format == "mustache":
|
|
293
|
+
# Use our secure mustache renderer
|
|
294
|
+
variables_with_str_values = dict_values_to_string(self.variables)
|
|
295
|
+
formatted_prompt = safe_mustache_render(self.template, variables_with_str_values)
|
|
296
|
+
self.text = formatted_prompt
|
|
297
|
+
return formatted_prompt
|
|
298
|
+
# Use langchain's template for other formats
|
|
299
|
+
from langchain_core.prompts.prompt import PromptTemplate
|
|
300
|
+
|
|
301
|
+
prompt_template = PromptTemplate.from_template(self.template, template_format=template_format)
|
|
271
302
|
variables_with_str_values = dict_values_to_string(self.variables)
|
|
272
303
|
formatted_prompt = prompt_template.format(**variables_with_str_values)
|
|
273
304
|
self.text = formatted_prompt
|
|
274
305
|
return formatted_prompt
|
|
275
306
|
|
|
276
307
|
@classmethod
|
|
277
|
-
async def from_template_and_variables(cls, template: str, **variables):
|
|
308
|
+
async def from_template_and_variables(cls, template: str, template_format: str = "f-string", **variables):
|
|
278
309
|
# This method has to be async for backwards compatibility with versions
|
|
279
310
|
# >1.0.15, <1.1
|
|
280
|
-
return cls.from_template(template, **variables)
|
|
311
|
+
return cls.from_template(template, template_format=template_format, **variables)
|
|
281
312
|
|
|
282
313
|
# Define a sync version for backwards compatibility with versions >1.0.15, <1.1
|
|
283
314
|
@classmethod
|
|
284
|
-
def from_template(cls, template: str, **variables):
|
|
315
|
+
def from_template(cls, template: str, template_format: str = "f-string", **variables):
|
|
316
|
+
from langchain_core.prompts.chat import ChatPromptTemplate
|
|
317
|
+
|
|
285
318
|
instance = cls(template=template, variables=variables)
|
|
286
|
-
text = instance.format_text()
|
|
319
|
+
text = instance.format_text(template_format=template_format)
|
|
287
320
|
message = HumanMessage(content=text)
|
|
288
321
|
contents = []
|
|
289
322
|
for value in variables.values():
|
|
@@ -314,6 +347,50 @@ class Message(Data):
|
|
|
314
347
|
|
|
315
348
|
return DataFrame(data=[self])
|
|
316
349
|
|
|
350
|
+
def get_id(self) -> str | UUID | None:
|
|
351
|
+
"""Safely get the message ID.
|
|
352
|
+
|
|
353
|
+
Returns:
|
|
354
|
+
The message ID if it exists, None otherwise.
|
|
355
|
+
|
|
356
|
+
Note:
|
|
357
|
+
A message only has an ID if it has been stored in the database.
|
|
358
|
+
Messages that are skipped (via _should_skip_message) will not have an ID.
|
|
359
|
+
"""
|
|
360
|
+
return getattr(self, "id", None)
|
|
361
|
+
|
|
362
|
+
def has_id(self) -> bool:
|
|
363
|
+
"""Check if the message has an ID.
|
|
364
|
+
|
|
365
|
+
Returns:
|
|
366
|
+
True if the message has an ID, False otherwise.
|
|
367
|
+
|
|
368
|
+
Note:
|
|
369
|
+
A message only has an ID if it has been stored in the database.
|
|
370
|
+
Messages that are skipped (via _should_skip_message) will not have an ID.
|
|
371
|
+
"""
|
|
372
|
+
message_id = getattr(self, "id", None)
|
|
373
|
+
return message_id is not None
|
|
374
|
+
|
|
375
|
+
def require_id(self) -> str | UUID:
|
|
376
|
+
"""Get the message ID, raising an error if it doesn't exist.
|
|
377
|
+
|
|
378
|
+
Returns:
|
|
379
|
+
The message ID.
|
|
380
|
+
|
|
381
|
+
Raises:
|
|
382
|
+
ValueError: If the message does not have an ID.
|
|
383
|
+
|
|
384
|
+
Note:
|
|
385
|
+
Use this method when an ID is required for the operation.
|
|
386
|
+
For optional ID access, use get_id() instead.
|
|
387
|
+
"""
|
|
388
|
+
message_id = getattr(self, "id", None)
|
|
389
|
+
if message_id is None:
|
|
390
|
+
msg = "Message does not have an ID. Messages only have IDs after being stored in the database."
|
|
391
|
+
raise ValueError(msg)
|
|
392
|
+
return message_id
|
|
393
|
+
|
|
317
394
|
|
|
318
395
|
class DefaultModel(BaseModel):
|
|
319
396
|
model_config = ConfigDict(
|
lfx/schema/workflow.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
"""Workflow execution schemas for V2 API."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from enum import Enum
|
|
6
|
+
from typing import Any, Literal
|
|
7
|
+
|
|
8
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class JobStatus(str, Enum):
|
|
12
|
+
"""Job execution status."""
|
|
13
|
+
|
|
14
|
+
QUEUED = "queued"
|
|
15
|
+
IN_PROGRESS = "in_progress"
|
|
16
|
+
COMPLETED = "completed"
|
|
17
|
+
FAILED = "failed"
|
|
18
|
+
ERROR = "error"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ErrorDetail(BaseModel):
|
|
22
|
+
"""Error detail schema."""
|
|
23
|
+
|
|
24
|
+
error: str
|
|
25
|
+
code: str | None = None
|
|
26
|
+
details: dict[str, Any] | None = None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ComponentOutput(BaseModel):
|
|
30
|
+
"""Component output schema."""
|
|
31
|
+
|
|
32
|
+
type: str = Field(..., description="Type of the component output (e.g., 'message', 'data', 'tool', 'text')")
|
|
33
|
+
component_id: str
|
|
34
|
+
status: JobStatus
|
|
35
|
+
content: Any | None = None
|
|
36
|
+
metadata: dict[str, Any] | None = None
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class GlobalInputs(BaseModel):
|
|
40
|
+
"""Global inputs that apply to all input components in the workflow."""
|
|
41
|
+
|
|
42
|
+
input_value: str | None = Field(None, description="The input value to send to input components")
|
|
43
|
+
input_type: str = Field("chat", description="The type of input (chat, text, etc.)")
|
|
44
|
+
session_id: str | None = Field(None, description="Session ID for conversation continuity")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class WorkflowExecutionRequest(BaseModel):
|
|
48
|
+
"""Request schema for workflow execution."""
|
|
49
|
+
|
|
50
|
+
background: bool = False
|
|
51
|
+
stream: bool = False
|
|
52
|
+
flow_id: str
|
|
53
|
+
inputs: dict[str, Any] | None = Field(
|
|
54
|
+
None, description="Inputs with 'global' key for global inputs and component IDs for component-specific tweaks"
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
model_config = ConfigDict(
|
|
58
|
+
json_schema_extra={
|
|
59
|
+
"examples": [
|
|
60
|
+
{
|
|
61
|
+
"background": False,
|
|
62
|
+
"stream": False,
|
|
63
|
+
"flow_id": "flow_67ccd2be17f0819081ff3bb2cf6508e60bb6a6b452d3795b",
|
|
64
|
+
"inputs": {
|
|
65
|
+
"global": {
|
|
66
|
+
"input_value": "Hello, how can you help me today?",
|
|
67
|
+
"input_type": "chat",
|
|
68
|
+
"session_id": "session-123",
|
|
69
|
+
},
|
|
70
|
+
"llm_component": {"temperature": 0.7, "max_tokens": 100},
|
|
71
|
+
"opensearch_component": {"opensearch_url": "https://opensearch:9200"},
|
|
72
|
+
},
|
|
73
|
+
},
|
|
74
|
+
{
|
|
75
|
+
"background": True,
|
|
76
|
+
"stream": False,
|
|
77
|
+
"flow_id": "flow_67ccd2be17f0819081ff3bb2cf6508e60bb6a6b452d3795b",
|
|
78
|
+
"inputs": {"global": {"input_value": "Process this in the background", "input_type": "text"}},
|
|
79
|
+
},
|
|
80
|
+
{
|
|
81
|
+
"background": False,
|
|
82
|
+
"stream": True,
|
|
83
|
+
"flow_id": "flow_67ccd2be17f0819081ff3bb2cf6508e60bb6a6b452d3795b",
|
|
84
|
+
"inputs": {"chat_component": {"text": "Stream this conversation"}},
|
|
85
|
+
},
|
|
86
|
+
]
|
|
87
|
+
},
|
|
88
|
+
extra="forbid",
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class WorkflowExecutionResponse(BaseModel):
|
|
93
|
+
"""Synchronous workflow execution response."""
|
|
94
|
+
|
|
95
|
+
flow_id: str
|
|
96
|
+
job_id: str
|
|
97
|
+
object: Literal["response"] = "response"
|
|
98
|
+
created_timestamp: str
|
|
99
|
+
status: JobStatus
|
|
100
|
+
errors: list[ErrorDetail] = []
|
|
101
|
+
inputs: dict[str, Any] = {}
|
|
102
|
+
outputs: dict[str, ComponentOutput] = {}
|
|
103
|
+
metadata: dict[str, Any] = {}
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
class WorkflowJobResponse(BaseModel):
|
|
107
|
+
"""Background job response."""
|
|
108
|
+
|
|
109
|
+
job_id: str
|
|
110
|
+
created_timestamp: str
|
|
111
|
+
status: JobStatus
|
|
112
|
+
errors: list[ErrorDetail] = []
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class WorkflowStreamEvent(BaseModel):
|
|
116
|
+
"""Streaming event response."""
|
|
117
|
+
|
|
118
|
+
type: str
|
|
119
|
+
run_id: str
|
|
120
|
+
timestamp: int
|
|
121
|
+
raw_event: dict[str, Any]
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class WorkflowStopRequest(BaseModel):
|
|
125
|
+
"""Request schema for stopping workflow."""
|
|
126
|
+
|
|
127
|
+
job_id: str
|
|
128
|
+
force: bool = Field(default=False, description="Force stop the workflow")
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
class WorkflowStopResponse(BaseModel):
|
|
132
|
+
"""Response schema for stopping workflow."""
|
|
133
|
+
|
|
134
|
+
job_id: str
|
|
135
|
+
status: Literal["stopped", "stopping", "not_found", "error"]
|
|
136
|
+
message: str
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
# OpenAPI response definitions
|
|
140
|
+
WORKFLOW_EXECUTION_RESPONSES = {
|
|
141
|
+
200: {
|
|
142
|
+
"description": "Workflow execution response",
|
|
143
|
+
"content": {
|
|
144
|
+
"application/json": {
|
|
145
|
+
"schema": {
|
|
146
|
+
"oneOf": [
|
|
147
|
+
WorkflowExecutionResponse.model_json_schema(),
|
|
148
|
+
WorkflowJobResponse.model_json_schema(),
|
|
149
|
+
]
|
|
150
|
+
}
|
|
151
|
+
},
|
|
152
|
+
"text/event-stream": {
|
|
153
|
+
"schema": WorkflowStreamEvent.model_json_schema(),
|
|
154
|
+
"description": "Server-sent events for streaming execution",
|
|
155
|
+
},
|
|
156
|
+
},
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
WORKFLOW_STATUS_RESPONSES = {
|
|
161
|
+
200: {
|
|
162
|
+
"description": "Workflow status response",
|
|
163
|
+
"content": {
|
|
164
|
+
"application/json": {"schema": WorkflowExecutionResponse.model_json_schema()},
|
|
165
|
+
"text/event-stream": {
|
|
166
|
+
"schema": WorkflowStreamEvent.model_json_schema(),
|
|
167
|
+
"description": "Server-sent events for streaming status",
|
|
168
|
+
},
|
|
169
|
+
},
|
|
170
|
+
}
|
|
171
|
+
}
|
lfx/services/deps.py
CHANGED
|
@@ -24,6 +24,7 @@ if TYPE_CHECKING:
|
|
|
24
24
|
SettingsServiceProtocol,
|
|
25
25
|
StorageServiceProtocol,
|
|
26
26
|
TracingServiceProtocol,
|
|
27
|
+
TransactionServiceProtocol,
|
|
27
28
|
VariableServiceProtocol,
|
|
28
29
|
)
|
|
29
30
|
|
|
@@ -118,6 +119,17 @@ def get_tracing_service() -> TracingServiceProtocol | None:
|
|
|
118
119
|
return get_service(ServiceType.TRACING_SERVICE)
|
|
119
120
|
|
|
120
121
|
|
|
122
|
+
def get_transaction_service() -> TransactionServiceProtocol | None:
|
|
123
|
+
"""Retrieves the transaction service instance.
|
|
124
|
+
|
|
125
|
+
Returns the transaction service for logging component executions.
|
|
126
|
+
Returns None if no transaction service is registered.
|
|
127
|
+
"""
|
|
128
|
+
from lfx.services.schema import ServiceType
|
|
129
|
+
|
|
130
|
+
return get_service(ServiceType.TRANSACTION_SERVICE)
|
|
131
|
+
|
|
132
|
+
|
|
121
133
|
async def get_session():
|
|
122
134
|
msg = "get_session is deprecated, use session_scope instead"
|
|
123
135
|
logger.warning(msg)
|
lfx/services/interfaces.py
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
from abc import abstractmethod
|
|
6
|
-
from typing import TYPE_CHECKING, Any, Protocol
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable
|
|
7
7
|
|
|
8
8
|
if TYPE_CHECKING:
|
|
9
9
|
import asyncio
|
|
@@ -106,3 +106,45 @@ class TracingServiceProtocol(Protocol):
|
|
|
106
106
|
def log(self, message: str, **kwargs) -> None:
|
|
107
107
|
"""Log tracing information."""
|
|
108
108
|
...
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
@runtime_checkable
|
|
112
|
+
class TransactionServiceProtocol(Protocol):
|
|
113
|
+
"""Protocol for transaction logging service.
|
|
114
|
+
|
|
115
|
+
This service handles logging of component execution transactions,
|
|
116
|
+
tracking inputs, outputs, and status of each vertex build.
|
|
117
|
+
"""
|
|
118
|
+
|
|
119
|
+
@abstractmethod
|
|
120
|
+
async def log_transaction(
|
|
121
|
+
self,
|
|
122
|
+
flow_id: str,
|
|
123
|
+
vertex_id: str,
|
|
124
|
+
inputs: dict[str, Any] | None,
|
|
125
|
+
outputs: dict[str, Any] | None,
|
|
126
|
+
status: str,
|
|
127
|
+
target_id: str | None = None,
|
|
128
|
+
error: str | None = None,
|
|
129
|
+
) -> None:
|
|
130
|
+
"""Log a transaction record for a vertex execution.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
flow_id: The flow ID (as string)
|
|
134
|
+
vertex_id: The vertex/component ID
|
|
135
|
+
inputs: Input parameters for the component
|
|
136
|
+
outputs: Output results from the component
|
|
137
|
+
status: Execution status (success/error)
|
|
138
|
+
target_id: Optional target vertex ID
|
|
139
|
+
error: Optional error message
|
|
140
|
+
"""
|
|
141
|
+
...
|
|
142
|
+
|
|
143
|
+
@abstractmethod
|
|
144
|
+
def is_enabled(self) -> bool:
|
|
145
|
+
"""Check if transaction logging is enabled.
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
True if transaction logging is enabled, False otherwise.
|
|
149
|
+
"""
|
|
150
|
+
...
|
lfx/services/schema.py
CHANGED
lfx/services/settings/auth.py
CHANGED
|
@@ -1,14 +1,33 @@
|
|
|
1
1
|
import secrets
|
|
2
|
+
from enum import Enum
|
|
2
3
|
from pathlib import Path
|
|
3
4
|
from typing import Literal
|
|
4
5
|
|
|
5
6
|
from passlib.context import CryptContext
|
|
6
|
-
from pydantic import Field, SecretStr, field_validator
|
|
7
|
+
from pydantic import Field, SecretStr, field_validator, model_validator
|
|
7
8
|
from pydantic_settings import BaseSettings, SettingsConfigDict
|
|
8
9
|
|
|
9
10
|
from lfx.log.logger import logger
|
|
10
11
|
from lfx.services.settings.constants import DEFAULT_SUPERUSER, DEFAULT_SUPERUSER_PASSWORD
|
|
11
|
-
from lfx.services.settings.utils import
|
|
12
|
+
from lfx.services.settings.utils import (
|
|
13
|
+
derive_public_key_from_private,
|
|
14
|
+
generate_rsa_key_pair,
|
|
15
|
+
read_secret_from_file,
|
|
16
|
+
write_public_key_to_file,
|
|
17
|
+
write_secret_to_file,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class JWTAlgorithm(str, Enum):
|
|
22
|
+
"""JWT signing algorithm options."""
|
|
23
|
+
|
|
24
|
+
HS256 = "HS256"
|
|
25
|
+
RS256 = "RS256"
|
|
26
|
+
RS512 = "RS512"
|
|
27
|
+
|
|
28
|
+
def is_asymmetric(self) -> bool:
|
|
29
|
+
"""Return True if this algorithm uses asymmetric (public/private key) cryptography."""
|
|
30
|
+
return self in (JWTAlgorithm.RS256, JWTAlgorithm.RS512)
|
|
12
31
|
|
|
13
32
|
|
|
14
33
|
class AuthSettings(BaseSettings):
|
|
@@ -16,10 +35,22 @@ class AuthSettings(BaseSettings):
|
|
|
16
35
|
CONFIG_DIR: str
|
|
17
36
|
SECRET_KEY: SecretStr = Field(
|
|
18
37
|
default=SecretStr(""),
|
|
19
|
-
description="Secret key for JWT. If not provided, a random one will be generated.",
|
|
38
|
+
description="Secret key for JWT (used with HS256). If not provided, a random one will be generated.",
|
|
39
|
+
frozen=False,
|
|
40
|
+
)
|
|
41
|
+
PRIVATE_KEY: SecretStr = Field(
|
|
42
|
+
default=SecretStr(""),
|
|
43
|
+
description="RSA private key for JWT signing (RS256/RS512). Auto-generated if not provided.",
|
|
20
44
|
frozen=False,
|
|
21
45
|
)
|
|
22
|
-
|
|
46
|
+
PUBLIC_KEY: str = Field(
|
|
47
|
+
default="",
|
|
48
|
+
description="RSA public key for JWT verification (RS256/RS512). Derived from private key if not provided.",
|
|
49
|
+
)
|
|
50
|
+
ALGORITHM: JWTAlgorithm = Field(
|
|
51
|
+
default=JWTAlgorithm.HS256,
|
|
52
|
+
description="JWT signing algorithm. Use RS256 or RS512 for asymmetric signing (recommended for production).",
|
|
53
|
+
)
|
|
23
54
|
ACCESS_TOKEN_EXPIRE_SECONDS: int = 60 * 60 # 1 hour
|
|
24
55
|
REFRESH_TOKEN_EXPIRE_SECONDS: int = 60 * 60 * 24 * 7 # 7 days
|
|
25
56
|
|
|
@@ -138,3 +169,63 @@ class AuthSettings(BaseSettings):
|
|
|
138
169
|
logger.debug("Saved secret key")
|
|
139
170
|
|
|
140
171
|
return value if isinstance(value, SecretStr) else SecretStr(value).get_secret_value()
|
|
172
|
+
|
|
173
|
+
@model_validator(mode="after")
|
|
174
|
+
def setup_rsa_keys(self):
|
|
175
|
+
"""Generate or load RSA keys when using RS256/RS512 algorithm."""
|
|
176
|
+
if not self.ALGORITHM.is_asymmetric():
|
|
177
|
+
return self
|
|
178
|
+
|
|
179
|
+
config_dir = self.CONFIG_DIR
|
|
180
|
+
private_key_value = self.PRIVATE_KEY.get_secret_value() if self.PRIVATE_KEY else ""
|
|
181
|
+
|
|
182
|
+
if not config_dir:
|
|
183
|
+
# No config dir - generate keys in memory if not provided
|
|
184
|
+
if not private_key_value:
|
|
185
|
+
logger.debug("No CONFIG_DIR provided, generating RSA keys in memory")
|
|
186
|
+
private_key_pem, public_key_pem = generate_rsa_key_pair()
|
|
187
|
+
object.__setattr__(self, "PRIVATE_KEY", SecretStr(private_key_pem))
|
|
188
|
+
object.__setattr__(self, "PUBLIC_KEY", public_key_pem)
|
|
189
|
+
elif not self.PUBLIC_KEY:
|
|
190
|
+
# Derive public key from private key
|
|
191
|
+
public_key_pem = derive_public_key_from_private(private_key_value)
|
|
192
|
+
object.__setattr__(self, "PUBLIC_KEY", public_key_pem)
|
|
193
|
+
return self
|
|
194
|
+
|
|
195
|
+
private_key_path = Path(config_dir) / "private_key.pem"
|
|
196
|
+
public_key_path = Path(config_dir) / "public_key.pem"
|
|
197
|
+
|
|
198
|
+
if private_key_value:
|
|
199
|
+
# Private key provided via env var - save it and derive public key
|
|
200
|
+
logger.debug("RSA private key provided")
|
|
201
|
+
write_secret_to_file(private_key_path, private_key_value)
|
|
202
|
+
|
|
203
|
+
if not self.PUBLIC_KEY:
|
|
204
|
+
public_key_pem = derive_public_key_from_private(private_key_value)
|
|
205
|
+
object.__setattr__(self, "PUBLIC_KEY", public_key_pem)
|
|
206
|
+
write_public_key_to_file(public_key_path, public_key_pem)
|
|
207
|
+
# No private key provided - load from file or generate
|
|
208
|
+
elif private_key_path.exists():
|
|
209
|
+
logger.debug("Loading RSA keys from files")
|
|
210
|
+
private_key_pem = read_secret_from_file(private_key_path)
|
|
211
|
+
object.__setattr__(self, "PRIVATE_KEY", SecretStr(private_key_pem))
|
|
212
|
+
|
|
213
|
+
if public_key_path.exists():
|
|
214
|
+
public_key_pem = public_key_path.read_text(encoding="utf-8")
|
|
215
|
+
object.__setattr__(self, "PUBLIC_KEY", public_key_pem)
|
|
216
|
+
else:
|
|
217
|
+
# Derive public key from private key
|
|
218
|
+
public_key_pem = derive_public_key_from_private(private_key_pem)
|
|
219
|
+
object.__setattr__(self, "PUBLIC_KEY", public_key_pem)
|
|
220
|
+
write_public_key_to_file(public_key_path, public_key_pem)
|
|
221
|
+
else:
|
|
222
|
+
# Generate new RSA key pair
|
|
223
|
+
logger.debug("Generating new RSA key pair")
|
|
224
|
+
private_key_pem, public_key_pem = generate_rsa_key_pair()
|
|
225
|
+
write_secret_to_file(private_key_path, private_key_pem)
|
|
226
|
+
write_public_key_to_file(public_key_path, public_key_pem)
|
|
227
|
+
object.__setattr__(self, "PRIVATE_KEY", SecretStr(private_key_pem))
|
|
228
|
+
object.__setattr__(self, "PUBLIC_KEY", public_key_pem)
|
|
229
|
+
logger.debug("RSA key pair generated and saved")
|
|
230
|
+
|
|
231
|
+
return self
|
lfx/services/settings/base.py
CHANGED
|
@@ -313,6 +313,10 @@ class Settings(BaseSettings):
|
|
|
313
313
|
"""If set to True, Langflow will start the agentic MCP server that provides tools for
|
|
314
314
|
flow/component operations, template search, and graph visualization."""
|
|
315
315
|
|
|
316
|
+
# Developer API
|
|
317
|
+
developer_api_enabled: bool = False
|
|
318
|
+
"""If set to True, Langflow will enable developer API endpoints for advanced debugging and introspection."""
|
|
319
|
+
|
|
316
320
|
# Public Flow Settings
|
|
317
321
|
public_flow_cleanup_interval: int = Field(default=3600, gt=600)
|
|
318
322
|
"""The interval in seconds at which public temporary flows will be cleaned up.
|
lfx/services/settings/utils.py
CHANGED
|
@@ -1,9 +1,74 @@
|
|
|
1
1
|
import platform
|
|
2
2
|
from pathlib import Path
|
|
3
3
|
|
|
4
|
+
from cryptography.hazmat.primitives import serialization
|
|
5
|
+
from cryptography.hazmat.primitives.asymmetric import rsa
|
|
6
|
+
from cryptography.hazmat.primitives.serialization import load_pem_private_key
|
|
7
|
+
|
|
4
8
|
from lfx.log.logger import logger
|
|
5
9
|
|
|
6
10
|
|
|
11
|
+
class RSAKeyError(Exception):
|
|
12
|
+
"""Exception raised when RSA key operations fail."""
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def derive_public_key_from_private(private_key_pem: str) -> str:
|
|
16
|
+
"""Derive a public key from a private key PEM string.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
private_key_pem: The private key in PEM format.
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
str: The public key in PEM format.
|
|
23
|
+
|
|
24
|
+
Raises:
|
|
25
|
+
RSAKeyError: If the private key is invalid or cannot be processed.
|
|
26
|
+
"""
|
|
27
|
+
try:
|
|
28
|
+
private_key = load_pem_private_key(private_key_pem.encode(), password=None)
|
|
29
|
+
return (
|
|
30
|
+
private_key.public_key()
|
|
31
|
+
.public_bytes(
|
|
32
|
+
encoding=serialization.Encoding.PEM,
|
|
33
|
+
format=serialization.PublicFormat.SubjectPublicKeyInfo,
|
|
34
|
+
)
|
|
35
|
+
.decode("utf-8")
|
|
36
|
+
)
|
|
37
|
+
except Exception as e:
|
|
38
|
+
msg = f"Failed to derive public key from private key: {e}"
|
|
39
|
+
logger.error(msg)
|
|
40
|
+
raise RSAKeyError(msg) from e
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def generate_rsa_key_pair() -> tuple[str, str]:
|
|
44
|
+
"""Generate an RSA key pair for RS256 JWT signing.
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
tuple[str, str]: A tuple of (private_key_pem, public_key_pem) as strings.
|
|
48
|
+
"""
|
|
49
|
+
private_key = rsa.generate_private_key(
|
|
50
|
+
public_exponent=65537,
|
|
51
|
+
key_size=2048,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
private_key_pem = private_key.private_bytes(
|
|
55
|
+
encoding=serialization.Encoding.PEM,
|
|
56
|
+
format=serialization.PrivateFormat.PKCS8,
|
|
57
|
+
encryption_algorithm=serialization.NoEncryption(),
|
|
58
|
+
).decode("utf-8")
|
|
59
|
+
|
|
60
|
+
public_key_pem = (
|
|
61
|
+
private_key.public_key()
|
|
62
|
+
.public_bytes(
|
|
63
|
+
encoding=serialization.Encoding.PEM,
|
|
64
|
+
format=serialization.PublicFormat.SubjectPublicKeyInfo,
|
|
65
|
+
)
|
|
66
|
+
.decode("utf-8")
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
return private_key_pem, public_key_pem
|
|
70
|
+
|
|
71
|
+
|
|
7
72
|
def set_secure_permissions(file_path: Path) -> None:
|
|
8
73
|
if platform.system() in {"Linux", "Darwin"}: # Unix/Linux/Mac
|
|
9
74
|
file_path.chmod(0o600)
|
|
@@ -38,3 +103,20 @@ def write_secret_to_file(path: Path, value: str) -> None:
|
|
|
38
103
|
|
|
39
104
|
def read_secret_from_file(path: Path) -> str:
|
|
40
105
|
return path.read_text(encoding="utf-8")
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def write_public_key_to_file(path: Path, value: str) -> None:
|
|
109
|
+
"""Write a public key to file with appropriate permissions (0o644).
|
|
110
|
+
|
|
111
|
+
Public keys can be readable by others but should only be writable by owner.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
path: The file path to write to.
|
|
115
|
+
value: The public key content.
|
|
116
|
+
"""
|
|
117
|
+
path.write_text(value, encoding="utf-8")
|
|
118
|
+
try:
|
|
119
|
+
if platform.system() in {"Linux", "Darwin"}:
|
|
120
|
+
path.chmod(0o644)
|
|
121
|
+
except Exception: # noqa: BLE001
|
|
122
|
+
logger.exception("Failed to set permissions on public key file")
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""Transaction service implementations for lfx."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from lfx.services.interfaces import TransactionServiceProtocol
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class NoopTransactionService(TransactionServiceProtocol):
|
|
11
|
+
"""No-operation transaction service for standalone lfx mode.
|
|
12
|
+
|
|
13
|
+
This service is used when lfx runs without a concrete transaction
|
|
14
|
+
service implementation (e.g., without langflow). All operations
|
|
15
|
+
are no-ops and transaction logging is disabled.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
async def log_transaction(
|
|
19
|
+
self,
|
|
20
|
+
flow_id: str,
|
|
21
|
+
vertex_id: str,
|
|
22
|
+
inputs: dict[str, Any] | None,
|
|
23
|
+
outputs: dict[str, Any] | None,
|
|
24
|
+
status: str,
|
|
25
|
+
target_id: str | None = None,
|
|
26
|
+
error: str | None = None,
|
|
27
|
+
) -> None:
|
|
28
|
+
"""No-op implementation of transaction logging.
|
|
29
|
+
|
|
30
|
+
In standalone mode, transactions are not persisted.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def is_enabled(self) -> bool:
|
|
34
|
+
"""Transaction logging is disabled in noop mode."""
|
|
35
|
+
return False
|
|
File without changes
|