ibm-watsonx-orchestrate 1.12.0b1__py3-none-any.whl → 1.13.0b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ibm_watsonx_orchestrate/__init__.py +2 -1
- ibm_watsonx_orchestrate/agent_builder/connections/types.py +34 -3
- ibm_watsonx_orchestrate/agent_builder/knowledge_bases/types.py +11 -2
- ibm_watsonx_orchestrate/agent_builder/models/types.py +17 -1
- ibm_watsonx_orchestrate/agent_builder/toolkits/types.py +14 -2
- ibm_watsonx_orchestrate/agent_builder/tools/__init__.py +1 -1
- ibm_watsonx_orchestrate/agent_builder/tools/langflow_tool.py +61 -1
- ibm_watsonx_orchestrate/agent_builder/tools/types.py +21 -3
- ibm_watsonx_orchestrate/agent_builder/voice_configurations/__init__.py +1 -1
- ibm_watsonx_orchestrate/agent_builder/voice_configurations/types.py +11 -0
- ibm_watsonx_orchestrate/cli/commands/agents/agents_controller.py +27 -51
- ibm_watsonx_orchestrate/cli/commands/connections/connections_command.py +2 -2
- ibm_watsonx_orchestrate/cli/commands/connections/connections_controller.py +54 -28
- ibm_watsonx_orchestrate/cli/commands/copilot/copilot_command.py +25 -2
- ibm_watsonx_orchestrate/cli/commands/copilot/copilot_controller.py +249 -14
- ibm_watsonx_orchestrate/cli/commands/copilot/copilot_server_controller.py +4 -4
- ibm_watsonx_orchestrate/cli/commands/environment/environment_command.py +5 -1
- ibm_watsonx_orchestrate/cli/commands/environment/environment_controller.py +6 -3
- ibm_watsonx_orchestrate/cli/commands/evaluations/evaluations_command.py +3 -2
- ibm_watsonx_orchestrate/cli/commands/evaluations/evaluations_controller.py +1 -1
- ibm_watsonx_orchestrate/cli/commands/knowledge_bases/knowledge_bases_controller.py +45 -16
- ibm_watsonx_orchestrate/cli/commands/models/models_command.py +2 -2
- ibm_watsonx_orchestrate/cli/commands/models/models_controller.py +29 -10
- ibm_watsonx_orchestrate/cli/commands/partners/offering/partners_offering_controller.py +21 -4
- ibm_watsonx_orchestrate/cli/commands/partners/offering/types.py +7 -15
- ibm_watsonx_orchestrate/cli/commands/server/server_command.py +19 -17
- ibm_watsonx_orchestrate/cli/commands/toolkit/toolkit_controller.py +139 -27
- ibm_watsonx_orchestrate/cli/commands/tools/tools_command.py +2 -2
- ibm_watsonx_orchestrate/cli/commands/tools/tools_controller.py +79 -36
- ibm_watsonx_orchestrate/cli/commands/voice_configurations/voice_configurations_controller.py +23 -11
- ibm_watsonx_orchestrate/cli/common.py +26 -0
- ibm_watsonx_orchestrate/cli/config.py +33 -2
- ibm_watsonx_orchestrate/client/connections/connections_client.py +1 -14
- ibm_watsonx_orchestrate/client/copilot/cpe/copilot_cpe_client.py +34 -1
- ibm_watsonx_orchestrate/client/knowledge_bases/knowledge_base_client.py +6 -2
- ibm_watsonx_orchestrate/client/model_policies/model_policies_client.py +1 -1
- ibm_watsonx_orchestrate/client/models/models_client.py +1 -1
- ibm_watsonx_orchestrate/client/threads/threads_client.py +34 -0
- ibm_watsonx_orchestrate/client/utils.py +29 -7
- ibm_watsonx_orchestrate/docker/compose-lite.yml +2 -2
- ibm_watsonx_orchestrate/docker/default.env +15 -9
- ibm_watsonx_orchestrate/flow_builder/flows/decorators.py +2 -0
- ibm_watsonx_orchestrate/flow_builder/flows/flow.py +59 -9
- ibm_watsonx_orchestrate/flow_builder/node.py +13 -1
- ibm_watsonx_orchestrate/flow_builder/types.py +39 -0
- ibm_watsonx_orchestrate/langflow/__init__.py +0 -0
- ibm_watsonx_orchestrate/langflow/langflow_utils.py +195 -0
- ibm_watsonx_orchestrate/langflow/lfx_deps.py +84 -0
- ibm_watsonx_orchestrate/utils/async_helpers.py +31 -0
- ibm_watsonx_orchestrate/utils/docker_utils.py +1177 -33
- ibm_watsonx_orchestrate/utils/environment.py +165 -20
- ibm_watsonx_orchestrate/utils/exceptions.py +1 -1
- ibm_watsonx_orchestrate/utils/tokens.py +51 -0
- ibm_watsonx_orchestrate/utils/utils.py +63 -4
- {ibm_watsonx_orchestrate-1.12.0b1.dist-info → ibm_watsonx_orchestrate-1.13.0b0.dist-info}/METADATA +2 -2
- {ibm_watsonx_orchestrate-1.12.0b1.dist-info → ibm_watsonx_orchestrate-1.13.0b0.dist-info}/RECORD +59 -52
- {ibm_watsonx_orchestrate-1.12.0b1.dist-info → ibm_watsonx_orchestrate-1.13.0b0.dist-info}/WHEEL +0 -0
- {ibm_watsonx_orchestrate-1.12.0b1.dist-info → ibm_watsonx_orchestrate-1.13.0b0.dist-info}/entry_points.txt +0 -0
- {ibm_watsonx_orchestrate-1.12.0b1.dist-info → ibm_watsonx_orchestrate-1.13.0b0.dist-info}/licenses/LICENSE +0 -0
@@ -58,16 +58,16 @@ REGISTRY_URL=
|
|
58
58
|
|
59
59
|
|
60
60
|
|
61
|
-
SERVER_TAG=
|
61
|
+
SERVER_TAG=24-09-2025-17d7c67
|
62
62
|
SERVER_REGISTRY=
|
63
63
|
|
64
|
-
WORKER_TAG=
|
64
|
+
WORKER_TAG=24-09-2025-17d7c67
|
65
65
|
WORKER_REGISTRY=
|
66
66
|
|
67
67
|
AI_GATEWAY_TAG=20-08-2025-9ed6d40
|
68
68
|
AI_GATEWAY_REGISTRY=
|
69
69
|
|
70
|
-
AGENT_GATEWAY_TAG=
|
70
|
+
AGENT_GATEWAY_TAG=19-10-2025-e48ad3a
|
71
71
|
AGENT_GATEWAY_REGISTRY=
|
72
72
|
|
73
73
|
DB_REGISTRY=
|
@@ -78,7 +78,7 @@ AMDDBTAG=17-09-2025-8a9aff2
|
|
78
78
|
ARM64DBTAG=17-09-2025-8a9aff2
|
79
79
|
|
80
80
|
UI_REGISTRY=
|
81
|
-
UITAG=
|
81
|
+
UITAG=22-09-2025-e35f498
|
82
82
|
|
83
83
|
CM_REGISTRY=
|
84
84
|
CM_TAG=16-09-2025-e33b344
|
@@ -86,16 +86,16 @@ CM_TAG=16-09-2025-e33b344
|
|
86
86
|
CONNECTIONS_UI_REGISTRY=
|
87
87
|
CONNECTIONS_UI_TAG=15-09-2025-98aa9da
|
88
88
|
|
89
|
-
TRM_TAG=
|
89
|
+
TRM_TAG=16-09-2025-cb6b9df
|
90
90
|
TRM_REGISTRY=
|
91
91
|
|
92
|
-
TR_TAG=
|
92
|
+
TR_TAG=24-09-2025-a515038
|
93
93
|
TR_REGISTRY=
|
94
94
|
|
95
|
-
BUILDER_TAG=
|
95
|
+
BUILDER_TAG=24-09-2025-f9b68d8
|
96
96
|
BUILDER_REGISTRY=
|
97
97
|
|
98
|
-
FLOW_RUNTIME_TAG=
|
98
|
+
FLOW_RUNTIME_TAG=22-09-2025-0bd3f58
|
99
99
|
FLOW_RUMTIME_REGISTRY=
|
100
100
|
|
101
101
|
|
@@ -108,7 +108,7 @@ JAEGER_PROXY_REGISTRY=
|
|
108
108
|
SOCKET_HANDLER_TAG=29-05-2025
|
109
109
|
SOCKET_HANDLER_REGISTRY=
|
110
110
|
|
111
|
-
CPE_TAG=
|
111
|
+
CPE_TAG=24-09-2025-2be3101
|
112
112
|
CPE_REGISTRY=
|
113
113
|
|
114
114
|
VOICE_CONTROLLER_TAG=12-09-2025-0e04772
|
@@ -127,6 +127,12 @@ DOCPROC_CACHE_TAG=20250916-master-86-454157f
|
|
127
127
|
DOCPROC_DPI_TAG=20250910-214413-288-a348dfd9
|
128
128
|
DOCPROC_REGISTRY=
|
129
129
|
|
130
|
+
ETCD_TAG=
|
131
|
+
ETCD_REGISTRY=
|
132
|
+
|
133
|
+
ELASTICSEARCH_TAG=
|
134
|
+
ELASTICSEARCH_REGISTRY=
|
135
|
+
|
130
136
|
# END -- IMAGE REGISTRIES AND TAGS
|
131
137
|
|
132
138
|
CPD_VERIFY=true
|
@@ -35,6 +35,7 @@ def flow(*args,
|
|
35
35
|
description: str|None=None,
|
36
36
|
input_schema: type[BaseModel] | None = None,
|
37
37
|
output_schema: type[BaseModel] | None = None,
|
38
|
+
private_schema: type[BaseModel] | None = None,
|
38
39
|
initiators: Sequence[str] = (),
|
39
40
|
schedulable: bool = False,
|
40
41
|
llm_model: str|ListVirtualModel|None=None,
|
@@ -63,6 +64,7 @@ def flow(*args,
|
|
63
64
|
description = node_spec.description,
|
64
65
|
input_schema = input_schema,
|
65
66
|
output_schema = output_schema,
|
67
|
+
private_schema = private_schema,
|
66
68
|
initiators = initiators,
|
67
69
|
schedulable = schedulable,
|
68
70
|
llm_model = llm_model,
|
@@ -26,13 +26,15 @@ from ibm_watsonx_orchestrate.client.tools.tool_client import ToolClient
|
|
26
26
|
from ibm_watsonx_orchestrate.client.tools.tempus_client import TempusClient
|
27
27
|
from ibm_watsonx_orchestrate.client.utils import instantiate_client
|
28
28
|
from ..types import (
|
29
|
-
DocProcKVPSchema, Assignment, Conditions, EndNodeSpec, Expression, ForeachPolicy, ForeachSpec, LoopSpec, BranchNodeSpec, MatchPolicy,
|
29
|
+
DocProcKVPSchema, Assignment, Conditions, EndNodeSpec, Expression, ForeachPolicy, ForeachSpec, LoopSpec, BranchNodeSpec, MatchPolicy,
|
30
|
+
NodeIdCondition, PlainTextReadingOrder, PromptExample, PromptLLMParameters, PromptNodeSpec, ScriptNodeSpec, TimerNodeSpec,
|
31
|
+
NodeErrorHandlerConfig, NodeIdCondition, PlainTextReadingOrder, PromptExample, PromptLLMParameters, PromptNodeSpec,
|
30
32
|
StartNodeSpec, ToolSpec, JsonSchemaObject, ToolRequestBody, ToolResponseBody, UserFieldKind, UserFieldOption, UserFlowSpec, UserNodeSpec, WaitPolicy,
|
31
33
|
DocProcSpec, TextExtractionResponse, DocProcInput, DecisionsNodeSpec, DecisionsRule, DocExtSpec, File, DocumentClassificationResponse, DocClassifierSpec, DocumentProcessingCommonInput
|
32
34
|
)
|
33
35
|
from .constants import CURRENT_USER, START, END, ANY_USER
|
34
36
|
from ..node import (
|
35
|
-
EndNode, Node, PromptNode, StartNode, UserNode, AgentNode, DataMap, ToolNode, DocProcNode, DecisionsNode, DocExtNode, DocClassifierNode
|
37
|
+
EndNode, Node, PromptNode, ScriptNode, StartNode, UserNode, AgentNode, DataMap, ToolNode, DocProcNode, DecisionsNode, DocExtNode, DocClassifierNode
|
36
38
|
)
|
37
39
|
from ..types import (
|
38
40
|
AgentNodeSpec, extract_node_spec, FlowContext, FlowEventType, FlowEvent, FlowSpec,
|
@@ -269,7 +271,8 @@ class Flow(Node):
|
|
269
271
|
|
270
272
|
def _create_node_from_tool_fn(
|
271
273
|
self,
|
272
|
-
tool: Callable
|
274
|
+
tool: Callable,
|
275
|
+
error_handler_config: Optional[NodeErrorHandlerConfig] = None
|
273
276
|
) -> ToolNode:
|
274
277
|
if not isinstance(tool, Callable):
|
275
278
|
raise ValueError("Only functions with @tool decorator can be added.")
|
@@ -292,7 +295,8 @@ class Flow(Node):
|
|
292
295
|
input_schema = tool_spec.input_schema,
|
293
296
|
output_schema = tool_spec.output_schema,
|
294
297
|
output_schema_object = spec.output_schema_object,
|
295
|
-
tool = tool_spec.name
|
298
|
+
tool = tool_spec.name,
|
299
|
+
error_handler_config = error_handler_config,)
|
296
300
|
|
297
301
|
return ToolNode(spec=toolnode_spec)
|
298
302
|
|
@@ -302,14 +306,18 @@ class Flow(Node):
|
|
302
306
|
name: str | None = None,
|
303
307
|
display_name: str | None = None,
|
304
308
|
description: str | None = None,
|
305
|
-
|
306
309
|
input_schema: type[BaseModel] | None = None,
|
307
310
|
output_schema: type[BaseModel] | None = None,
|
308
|
-
input_map: DataMap = None
|
311
|
+
input_map: DataMap = None,
|
312
|
+
error_handler_config: NodeErrorHandlerConfig | None = None
|
309
313
|
) -> ToolNode:
|
310
314
|
'''create a tool node in the flow'''
|
311
315
|
if tool is None:
|
312
316
|
raise ValueError("tool must be provided")
|
317
|
+
|
318
|
+
|
319
|
+
if isinstance(error_handler_config, dict):
|
320
|
+
error_handler_config = NodeErrorHandlerConfig.model_validate(error_handler_config)
|
313
321
|
|
314
322
|
if isinstance(tool, str):
|
315
323
|
name = name if name is not None and name != "" else tool
|
@@ -338,14 +346,16 @@ class Flow(Node):
|
|
338
346
|
input_schema= _get_tool_request_body(input_schema_obj),
|
339
347
|
output_schema= _get_tool_response_body(output_schema_obj),
|
340
348
|
output_schema_object = output_schema_obj,
|
341
|
-
tool = tool
|
349
|
+
tool = tool,
|
350
|
+
error_handler_config = error_handler_config
|
351
|
+
)
|
342
352
|
|
343
353
|
node = ToolNode(spec=toolnode_spec)
|
344
354
|
elif isinstance(tool, PythonTool):
|
345
355
|
if callable(tool):
|
346
356
|
tool_spec = getattr(tool, "__tool_spec__", None)
|
347
357
|
if tool_spec:
|
348
|
-
node = self._create_node_from_tool_fn(tool)
|
358
|
+
node = self._create_node_from_tool_fn(tool, error_handler_config = error_handler_config)
|
349
359
|
else:
|
350
360
|
raise ValueError("Only functions with @tool decorator can be added.")
|
351
361
|
else:
|
@@ -357,6 +367,41 @@ class Flow(Node):
|
|
357
367
|
|
358
368
|
node = self._add_node(node)
|
359
369
|
return cast(ToolNode, node)
|
370
|
+
|
371
|
+
|
372
|
+
def script(
|
373
|
+
self,
|
374
|
+
script: str | None = "",
|
375
|
+
name: str | None = None,
|
376
|
+
display_name: str | None = None,
|
377
|
+
description: str | None = None,
|
378
|
+
input_schema: type[BaseModel] | None = None,
|
379
|
+
output_schema: type[BaseModel] | None = None,
|
380
|
+
input_map: DataMap = None
|
381
|
+
) -> ScriptNode:
|
382
|
+
'''create a script node in the flow'''
|
383
|
+
name = name if name is not None and name != "" else ""
|
384
|
+
|
385
|
+
input_schema_obj = _get_json_schema_obj("input", input_schema)
|
386
|
+
output_schema_obj = _get_json_schema_obj("output", output_schema)
|
387
|
+
|
388
|
+
script_node_spec = ScriptNodeSpec(
|
389
|
+
name = name,
|
390
|
+
display_name = display_name,
|
391
|
+
description = description,
|
392
|
+
input_schema= _get_tool_request_body(input_schema_obj),
|
393
|
+
output_schema= _get_tool_response_body(output_schema_obj),
|
394
|
+
output_schema_object = output_schema_obj,
|
395
|
+
fn = script)
|
396
|
+
|
397
|
+
node = ScriptNode(spec=script_node_spec)
|
398
|
+
|
399
|
+
# setup input and output map
|
400
|
+
if input_map:
|
401
|
+
node.input_map = self._get_data_map(input_map)
|
402
|
+
|
403
|
+
node = self._add_node(node)
|
404
|
+
return cast(ScriptNode, node)
|
360
405
|
|
361
406
|
|
362
407
|
def _add_node(self, node: Node) -> Node:
|
@@ -423,7 +468,8 @@ class Flow(Node):
|
|
423
468
|
description: str | None = None,
|
424
469
|
input_schema: type[BaseModel]|None = None,
|
425
470
|
output_schema: type[BaseModel]|None=None,
|
426
|
-
input_map: DataMap = None
|
471
|
+
input_map: DataMap = None,
|
472
|
+
error_handler_config: NodeErrorHandlerConfig | None = None,) -> PromptNode:
|
427
473
|
|
428
474
|
if name is None:
|
429
475
|
raise ValueError("name must be provided.")
|
@@ -442,6 +488,7 @@ class Flow(Node):
|
|
442
488
|
prompt_examples=prompt_examples,
|
443
489
|
llm=llm,
|
444
490
|
llm_parameters=llm_parameters,
|
491
|
+
error_handler_config=error_handler_config,
|
445
492
|
input_schema=_get_tool_request_body(input_schema_obj),
|
446
493
|
output_schema=_get_tool_response_body(output_schema_obj),
|
447
494
|
output_schema_object = output_schema_obj
|
@@ -1245,6 +1292,7 @@ class FlowFactory(BaseModel):
|
|
1245
1292
|
initiators: Sequence[str]|None=None,
|
1246
1293
|
input_schema: type[BaseModel]|None=None,
|
1247
1294
|
output_schema: type[BaseModel]|None=None,
|
1295
|
+
private_schema: type[BaseModel]|None=None,
|
1248
1296
|
schedulable: bool=False,
|
1249
1297
|
llm_model: str|ListVirtualModel|None=None,
|
1250
1298
|
agent_conversation_memory_turns_limit: int|None = None) -> Flow:
|
@@ -1257,6 +1305,7 @@ class FlowFactory(BaseModel):
|
|
1257
1305
|
input_schema_obj = _get_json_schema_obj(parameter_name = "input", type_def = input_schema)
|
1258
1306
|
# create input spec
|
1259
1307
|
output_schema_obj = _get_json_schema_obj("output", output_schema)
|
1308
|
+
private_schema_obj = _get_json_schema_obj("private", private_schema)
|
1260
1309
|
if initiators is None:
|
1261
1310
|
initiators = []
|
1262
1311
|
|
@@ -1268,6 +1317,7 @@ class FlowFactory(BaseModel):
|
|
1268
1317
|
initiators=initiators,
|
1269
1318
|
input_schema=_get_tool_request_body(input_schema_obj),
|
1270
1319
|
output_schema=_get_tool_response_body(output_schema_obj),
|
1320
|
+
private_schema = private_schema_obj,
|
1271
1321
|
output_schema_object = output_schema_obj,
|
1272
1322
|
schedulable=schedulable,
|
1273
1323
|
)
|
@@ -6,7 +6,7 @@ import yaml
|
|
6
6
|
from pydantic import BaseModel, Field, SerializeAsAny, create_model
|
7
7
|
from enum import Enum
|
8
8
|
|
9
|
-
from .types import Assignment, DocExtConfigField, EndNodeSpec, NodeSpec, AgentNodeSpec, PromptNodeSpec, TimerNodeSpec, StartNodeSpec, ToolNodeSpec, UserFieldKind, UserFieldOption, UserNodeSpec, DocProcSpec, \
|
9
|
+
from .types import Assignment, DocExtConfigField, EndNodeSpec, NodeSpec, AgentNodeSpec, PromptNodeSpec, ScriptNodeSpec, TimerNodeSpec, StartNodeSpec, ToolNodeSpec, UserFieldKind, UserFieldOption, UserNodeSpec, DocProcSpec, \
|
10
10
|
DocExtSpec, DocExtConfig, DocClassifierSpec, DecisionsNodeSpec, DocClassifierConfig
|
11
11
|
|
12
12
|
from .data_map import DataMap
|
@@ -132,6 +132,18 @@ class ToolNode(Node):
|
|
132
132
|
|
133
133
|
def get_spec(self) -> ToolNodeSpec:
|
134
134
|
return cast(ToolNodeSpec, self.spec)
|
135
|
+
|
136
|
+
|
137
|
+
class ScriptNode(Node):
|
138
|
+
def __repr__(self):
|
139
|
+
return f"ScriptNode(name='{self.spec.name}', description='{self.spec.description}')"
|
140
|
+
|
141
|
+
def get_spec(self) -> ScriptNodeSpec:
|
142
|
+
return cast(ScriptNodeSpec, self.spec)
|
143
|
+
|
144
|
+
def updateScript(self, script: str):
|
145
|
+
'''Update the script of a script node'''
|
146
|
+
self.spec.fn = script
|
135
147
|
|
136
148
|
class UserNode(Node):
|
137
149
|
def __repr__(self):
|
@@ -374,8 +374,24 @@ class EndNodeSpec(NodeSpec):
|
|
374
374
|
def __init__(self, **data):
|
375
375
|
super().__init__(**data)
|
376
376
|
self.kind = "end"
|
377
|
+
|
378
|
+
class NodeErrorHandlerConfig(BaseModel):
|
379
|
+
error_message: Optional[str] = None
|
380
|
+
max_retries: Optional[int] = None
|
381
|
+
retry_interval: Optional[int] = None
|
382
|
+
|
383
|
+
def to_json(self) -> dict[str, Any]:
|
384
|
+
model_spec = {}
|
385
|
+
if self.error_message:
|
386
|
+
model_spec["error_message"] = self.error_message
|
387
|
+
if self.max_retries:
|
388
|
+
model_spec["max_retries"] = self.max_retries
|
389
|
+
if self.retry_interval:
|
390
|
+
model_spec["retry_interval"] = self.retry_interval
|
391
|
+
return model_spec
|
377
392
|
class ToolNodeSpec(NodeSpec):
|
378
393
|
tool: Union[str, ToolSpec] = Field(default = None, description="the tool to use")
|
394
|
+
error_handler_config: Optional[NodeErrorHandlerConfig] = None
|
379
395
|
|
380
396
|
def __init__(self, **data):
|
381
397
|
super().__init__(**data)
|
@@ -383,12 +399,27 @@ class ToolNodeSpec(NodeSpec):
|
|
383
399
|
|
384
400
|
def to_json(self) -> dict[str, Any]:
|
385
401
|
model_spec = super().to_json()
|
402
|
+
if self.error_handler_config:
|
403
|
+
model_spec["error_handler_config"] = self.error_handler_config.to_json()
|
386
404
|
if self.tool:
|
387
405
|
if isinstance(self.tool, ToolSpec):
|
388
406
|
model_spec["tool"] = self.tool.model_dump(exclude_defaults=True, exclude_none=True, exclude_unset=True)
|
389
407
|
else:
|
390
408
|
model_spec["tool"] = self.tool
|
391
409
|
return model_spec
|
410
|
+
|
411
|
+
class ScriptNodeSpec(NodeSpec):
|
412
|
+
fn: str = Field(default = None, description="the script to execute")
|
413
|
+
|
414
|
+
def __init__(self, **data):
|
415
|
+
super().__init__(**data)
|
416
|
+
self.kind = "script"
|
417
|
+
|
418
|
+
def to_json(self) -> dict[str, Any]:
|
419
|
+
model_spec = super().to_json()
|
420
|
+
if self.fn:
|
421
|
+
model_spec["fn"] = self.fn
|
422
|
+
return model_spec
|
392
423
|
|
393
424
|
|
394
425
|
class UserFieldValue(BaseModel):
|
@@ -719,6 +750,7 @@ class PromptNodeSpec(NodeSpec):
|
|
719
750
|
prompt_examples: Optional[list[PromptExample]]
|
720
751
|
llm: Optional[str]
|
721
752
|
llm_parameters: Optional[PromptLLMParameters]
|
753
|
+
error_handler_config: Optional[NodeErrorHandlerConfig]
|
722
754
|
|
723
755
|
def __init__(self, **kwargs):
|
724
756
|
super().__init__(**kwargs)
|
@@ -734,6 +766,8 @@ class PromptNodeSpec(NodeSpec):
|
|
734
766
|
model_spec["llm"] = self.llm
|
735
767
|
if self.llm_parameters:
|
736
768
|
model_spec["llm_parameters"] = self.llm_parameters.to_json()
|
769
|
+
if self.error_handler_config:
|
770
|
+
model_spec["error_handler_config"] = self.error_handler_config.to_json()
|
737
771
|
if self.prompt_examples:
|
738
772
|
model_spec["prompt_examples"] = []
|
739
773
|
for example in self.prompt_examples:
|
@@ -871,6 +905,9 @@ class FlowSpec(NodeSpec):
|
|
871
905
|
initiators: Sequence[str] = [ANY_USER]
|
872
906
|
schedulable: bool = False
|
873
907
|
|
908
|
+
# flow can have private schema
|
909
|
+
private_schema: JsonSchemaObject | SchemaRef | None = None
|
910
|
+
|
874
911
|
def __init__(self, **kwargs):
|
875
912
|
super().__init__(**kwargs)
|
876
913
|
self.kind = "flow"
|
@@ -879,6 +916,8 @@ class FlowSpec(NodeSpec):
|
|
879
916
|
model_spec = super().to_json()
|
880
917
|
if self.initiators:
|
881
918
|
model_spec["initiators"] = self.initiators
|
919
|
+
if self.private_schema:
|
920
|
+
model_spec["private_schema"] = _to_json_from_json_schema(self.private_schema)
|
882
921
|
|
883
922
|
model_spec["schedulable"] = self.schedulable
|
884
923
|
|
File without changes
|
@@ -0,0 +1,195 @@
|
|
1
|
+
import logging
|
2
|
+
import ast
|
3
|
+
import sys
|
4
|
+
from pathlib import Path
|
5
|
+
import importlib.util
|
6
|
+
|
7
|
+
from pydantic import BaseModel
|
8
|
+
|
9
|
+
from .lfx_deps import LFX_DEPENDENCIES
|
10
|
+
|
11
|
+
logger = logging.getLogger(__name__)
|
12
|
+
|
13
|
+
class LangflowComponent(BaseModel):
|
14
|
+
id: str
|
15
|
+
name: str
|
16
|
+
credentials: dict
|
17
|
+
requirements: list[str] = []
|
18
|
+
|
19
|
+
class LangflowModelSpec(BaseModel):
|
20
|
+
version: str
|
21
|
+
components: list[LangflowComponent]
|
22
|
+
|
23
|
+
_MODULE_MAP = {
|
24
|
+
"mem0":"mem0ai",
|
25
|
+
}
|
26
|
+
|
27
|
+
import math
|
28
|
+
from collections import Counter
|
29
|
+
|
30
|
+
def _calculate_entropy(s):
|
31
|
+
"""
|
32
|
+
Calculates the Shannon entropy of a string.
|
33
|
+
|
34
|
+
Parameters:
|
35
|
+
s (str): Input string.
|
36
|
+
|
37
|
+
Returns:
|
38
|
+
float: Shannon entropy value.
|
39
|
+
"""
|
40
|
+
if not s:
|
41
|
+
return 0.0
|
42
|
+
|
43
|
+
freq = Counter(s)
|
44
|
+
length = len(s)
|
45
|
+
|
46
|
+
entropy = -sum((count / length) * math.log2(count / length) for count in freq.values())
|
47
|
+
return entropy
|
48
|
+
|
49
|
+
def _mask_api_key(key):
|
50
|
+
"""
|
51
|
+
Masks an API key by keeping the first 5 characters visible,
|
52
|
+
masking the rest with asterisks, and truncating the result to a maximum of 25 characters.
|
53
|
+
|
54
|
+
Parameters:
|
55
|
+
key (str): The API key string.
|
56
|
+
|
57
|
+
Returns:
|
58
|
+
str: Masked and truncated API key.
|
59
|
+
"""
|
60
|
+
if not isinstance(key, str):
|
61
|
+
return key
|
62
|
+
|
63
|
+
# if this is a potential real API key -- mask it
|
64
|
+
if _calculate_entropy(key) > 4.1:
|
65
|
+
visible_part = key[:5]
|
66
|
+
masked_part = '*' * (len(key) - 5)
|
67
|
+
masked_key = visible_part + masked_part
|
68
|
+
|
69
|
+
return masked_key[:25]
|
70
|
+
elif len(key) > 25:
|
71
|
+
# if the key is longer than 25 characters, truncates it anyway
|
72
|
+
return key[:22] + '...'
|
73
|
+
|
74
|
+
return key
|
75
|
+
|
76
|
+
def _extract_imports(source_code) -> list[str]:
|
77
|
+
tree = ast.parse(source_code)
|
78
|
+
imports = set()
|
79
|
+
for node in ast.walk(tree):
|
80
|
+
if isinstance(node, ast.Import):
|
81
|
+
for alias in node.names:
|
82
|
+
# we only need the module name, not sub-module
|
83
|
+
imports.add(alias.name.split('.')[0])
|
84
|
+
elif isinstance(node, ast.ImportFrom):
|
85
|
+
if node.module:
|
86
|
+
# we only need the module name, not sub-module
|
87
|
+
imports.add(node.module.split('.')[0])
|
88
|
+
return sorted(imports)
|
89
|
+
|
90
|
+
|
91
|
+
|
92
|
+
def _is_builtin_module(module_name: str) -> bool:
|
93
|
+
underscore_module_name = f"_{module_name}"
|
94
|
+
|
95
|
+
# Check against the list of standard modules
|
96
|
+
if module_name in sys.stdlib_module_names:
|
97
|
+
return True
|
98
|
+
|
99
|
+
if underscore_module_name in sys.stdlib_module_names:
|
100
|
+
return True
|
101
|
+
|
102
|
+
# Check against the list of built-in module names
|
103
|
+
if module_name in sys.builtin_module_names:
|
104
|
+
return True
|
105
|
+
|
106
|
+
if underscore_module_name in sys.builtin_module_names:
|
107
|
+
return True
|
108
|
+
|
109
|
+
# Use importlib to find the module spec
|
110
|
+
spec = importlib.util.find_spec(module_name)
|
111
|
+
if spec is None:
|
112
|
+
return False # Module not found
|
113
|
+
|
114
|
+
# Check if the loader is a BuiltinImporter
|
115
|
+
return isinstance(spec.loader, importlib.machinery.BuiltinImporter)
|
116
|
+
|
117
|
+
|
118
|
+
def _find_missing_requirements(imported_modules, requirements_modules: list[str]) -> list[str]:
|
119
|
+
"""
|
120
|
+
Compare imported modules with requirements.txt and return missing ones.
|
121
|
+
|
122
|
+
Parameters:
|
123
|
+
imported_modules (list): List of module names used in the code.
|
124
|
+
requirements_file_path (str): Path to the requirements.txt file.
|
125
|
+
|
126
|
+
Returns:
|
127
|
+
list: Modules that are imported but not listed in requirements.txt.
|
128
|
+
"""
|
129
|
+
def normalize_module_name(name):
|
130
|
+
module_name = name.split('.')[0].lower()
|
131
|
+
# sometimes the module name in pipy is different than the real name
|
132
|
+
if module_name in _MODULE_MAP:
|
133
|
+
module_name = _MODULE_MAP[module_name]
|
134
|
+
return module_name
|
135
|
+
|
136
|
+
# Normalize imported module names
|
137
|
+
normalized_imports = [normalize_module_name(mod) for mod in imported_modules]
|
138
|
+
|
139
|
+
# filter out all built-ins
|
140
|
+
filtered_imports = [
|
141
|
+
module for module in normalized_imports
|
142
|
+
if _is_builtin_module(module) is False
|
143
|
+
]
|
144
|
+
|
145
|
+
# Compare and find missing modules
|
146
|
+
missing_modules = [
|
147
|
+
module for module in filtered_imports
|
148
|
+
if module not in requirements_modules
|
149
|
+
]
|
150
|
+
|
151
|
+
return missing_modules
|
152
|
+
|
153
|
+
|
154
|
+
|
155
|
+
def parse_langflow_model(model) -> LangflowModelSpec:
|
156
|
+
"""
|
157
|
+
Extracts component details and Langflow version from a Langflow JSON object.
|
158
|
+
|
159
|
+
Parameters:
|
160
|
+
model (dict): The Langflow JSON object.
|
161
|
+
|
162
|
+
Returns:
|
163
|
+
LangflowModelSpec: A LangflowModelSpec object containing the extracted version and component information.
|
164
|
+
"""
|
165
|
+
version = model.get("last_tested_version", "Unknown")
|
166
|
+
components = []
|
167
|
+
data = model.get('data', {} )
|
168
|
+
|
169
|
+
# get the list of available modules
|
170
|
+
requirements_modules = LFX_DEPENDENCIES
|
171
|
+
|
172
|
+
for node in data.get("nodes", []):
|
173
|
+
node_data = node.get("data", {})
|
174
|
+
node_info = node_data.get("node", {})
|
175
|
+
template = node_info.get("template", {})
|
176
|
+
code = template.get("code")
|
177
|
+
credentials = {}
|
178
|
+
|
179
|
+
missing_imports = []
|
180
|
+
for field_name, field_info in template.items():
|
181
|
+
if isinstance(field_info, dict) and field_info.get("password", False) == True:
|
182
|
+
credentials[field_name] = _mask_api_key(field_info.get("value"))
|
183
|
+
|
184
|
+
if code and code.get("value") != None:
|
185
|
+
imports = _extract_imports(code.get("value"))
|
186
|
+
if len(imports) > 0:
|
187
|
+
missing_imports = _find_missing_requirements(imports, requirements_modules)
|
188
|
+
|
189
|
+
component_info = LangflowComponent(name=node_info.get("display_name", "Unknown"), id=node_data.get("id", "Unknown"),
|
190
|
+
credentials=credentials, requirements=missing_imports)
|
191
|
+
|
192
|
+
components.append(component_info)
|
193
|
+
|
194
|
+
return LangflowModelSpec(version=version, components=components)
|
195
|
+
|
@@ -0,0 +1,84 @@
|
|
1
|
+
LFX_DEPENDENCIES = [
|
2
|
+
"aiofile",
|
3
|
+
"aiofiles",
|
4
|
+
"annotated-types",
|
5
|
+
"anyio",
|
6
|
+
"asyncer",
|
7
|
+
"cachetools",
|
8
|
+
"caio",
|
9
|
+
"certifi",
|
10
|
+
"chardet",
|
11
|
+
"charset-normalizer",
|
12
|
+
"click",
|
13
|
+
"defusedxml",
|
14
|
+
"docstring_parser",
|
15
|
+
"emoji",
|
16
|
+
"fastapi",
|
17
|
+
"h11",
|
18
|
+
"h2",
|
19
|
+
"hpack",
|
20
|
+
"httpcore",
|
21
|
+
"httpx",
|
22
|
+
"hyperframe",
|
23
|
+
"idna",
|
24
|
+
"json_repair",
|
25
|
+
"jsonpatch",
|
26
|
+
"jsonpointer",
|
27
|
+
"langchain",
|
28
|
+
"langchain-core",
|
29
|
+
"langchain-text-splitters",
|
30
|
+
"langsmith",
|
31
|
+
"lfx-nightly",
|
32
|
+
"loguru",
|
33
|
+
"markdown-it-py",
|
34
|
+
"mdurl",
|
35
|
+
"nanoid",
|
36
|
+
"networkx",
|
37
|
+
"numpy",
|
38
|
+
"orjson",
|
39
|
+
"packaging",
|
40
|
+
"pandas",
|
41
|
+
"passlib",
|
42
|
+
"pillow",
|
43
|
+
"platformdirs",
|
44
|
+
"pydantic",
|
45
|
+
"pydantic-settings",
|
46
|
+
"pydantic_core",
|
47
|
+
"Pygments",
|
48
|
+
"python-dateutil",
|
49
|
+
"python-dotenv",
|
50
|
+
"pytz",
|
51
|
+
"PyYAML",
|
52
|
+
"requests",
|
53
|
+
"requests-toolbelt",
|
54
|
+
"rich",
|
55
|
+
"shellingham",
|
56
|
+
"six",
|
57
|
+
"sniffio",
|
58
|
+
"SQLAlchemy",
|
59
|
+
"starlette",
|
60
|
+
"structlog",
|
61
|
+
"tenacity",
|
62
|
+
"tomli",
|
63
|
+
"typer",
|
64
|
+
"typing-inspection",
|
65
|
+
"typing_extensions",
|
66
|
+
"tzdata",
|
67
|
+
"urllib3",
|
68
|
+
"uvicorn",
|
69
|
+
"validators",
|
70
|
+
"zstandard",
|
71
|
+
"langflow",
|
72
|
+
"langchain_openai",
|
73
|
+
"langchain_core",
|
74
|
+
"langchain_text_splitters",
|
75
|
+
"collections",
|
76
|
+
"typing",
|
77
|
+
"datetime",
|
78
|
+
"zoneinfo",
|
79
|
+
"or",
|
80
|
+
"re",
|
81
|
+
"os",
|
82
|
+
"copy",
|
83
|
+
"json"
|
84
|
+
]
|
@@ -0,0 +1,31 @@
|
|
1
|
+
import asyncio
|
2
|
+
import threading
|
3
|
+
from concurrent.futures import ThreadPoolExecutor
|
4
|
+
from typing import Any, Coroutine, TypeVar, Optional
|
5
|
+
|
6
|
+
T = TypeVar("T")
|
7
|
+
|
8
|
+
|
9
|
+
def run_coroutine_sync(coroutine: Coroutine[Any, Any, T], timeout: Optional[float] = None) -> T:
|
10
|
+
def run_in_new_loop():
|
11
|
+
new_loop = asyncio.new_event_loop()
|
12
|
+
asyncio.set_event_loop(new_loop)
|
13
|
+
try:
|
14
|
+
return new_loop.run_until_complete(coroutine)
|
15
|
+
finally:
|
16
|
+
new_loop.close()
|
17
|
+
|
18
|
+
try:
|
19
|
+
loop = asyncio.get_running_loop()
|
20
|
+
except RuntimeError:
|
21
|
+
return asyncio.run(coroutine)
|
22
|
+
|
23
|
+
if threading.current_thread() is threading.main_thread():
|
24
|
+
if not loop.is_running():
|
25
|
+
return loop.run_until_complete(coroutine)
|
26
|
+
else:
|
27
|
+
with ThreadPoolExecutor() as pool:
|
28
|
+
future = pool.submit(run_in_new_loop)
|
29
|
+
return future.result(timeout=timeout)
|
30
|
+
else:
|
31
|
+
return asyncio.run_coroutine_threadsafe(coroutine, loop).result()
|