ibm-watsonx-orchestrate 1.11.1__py3-none-any.whl → 1.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. ibm_watsonx_orchestrate/__init__.py +1 -1
  2. ibm_watsonx_orchestrate/agent_builder/agents/types.py +22 -5
  3. ibm_watsonx_orchestrate/agent_builder/connections/connections.py +3 -3
  4. ibm_watsonx_orchestrate/agent_builder/connections/types.py +14 -0
  5. ibm_watsonx_orchestrate/agent_builder/models/types.py +1 -0
  6. ibm_watsonx_orchestrate/agent_builder/toolkits/base_toolkit.py +1 -1
  7. ibm_watsonx_orchestrate/agent_builder/tools/__init__.py +1 -0
  8. ibm_watsonx_orchestrate/agent_builder/tools/base_tool.py +1 -1
  9. ibm_watsonx_orchestrate/agent_builder/tools/langflow_tool.py +184 -0
  10. ibm_watsonx_orchestrate/agent_builder/tools/openapi_tool.py +9 -3
  11. ibm_watsonx_orchestrate/agent_builder/tools/types.py +20 -2
  12. ibm_watsonx_orchestrate/cli/commands/agents/agents_controller.py +19 -6
  13. ibm_watsonx_orchestrate/cli/commands/connections/connections_command.py +18 -0
  14. ibm_watsonx_orchestrate/cli/commands/connections/connections_controller.py +114 -0
  15. ibm_watsonx_orchestrate/cli/commands/copilot/copilot_controller.py +2 -6
  16. ibm_watsonx_orchestrate/cli/commands/copilot/copilot_server_controller.py +24 -91
  17. ibm_watsonx_orchestrate/cli/commands/evaluations/evaluations_command.py +52 -2
  18. ibm_watsonx_orchestrate/cli/commands/evaluations/evaluations_controller.py +1 -1
  19. ibm_watsonx_orchestrate/cli/commands/models/model_provider_mapper.py +23 -4
  20. ibm_watsonx_orchestrate/cli/commands/models/models_controller.py +3 -3
  21. ibm_watsonx_orchestrate/cli/commands/partners/offering/partners_offering_command.py +56 -0
  22. ibm_watsonx_orchestrate/cli/commands/partners/offering/partners_offering_controller.py +475 -0
  23. ibm_watsonx_orchestrate/cli/commands/partners/offering/types.py +99 -0
  24. ibm_watsonx_orchestrate/cli/commands/partners/partners_command.py +12 -0
  25. ibm_watsonx_orchestrate/cli/commands/partners/partners_controller.py +0 -0
  26. ibm_watsonx_orchestrate/cli/commands/server/server_command.py +124 -637
  27. ibm_watsonx_orchestrate/cli/commands/toolkit/toolkit_command.py +2 -2
  28. ibm_watsonx_orchestrate/cli/commands/toolkit/toolkit_controller.py +2 -2
  29. ibm_watsonx_orchestrate/cli/commands/tools/tools_command.py +2 -3
  30. ibm_watsonx_orchestrate/cli/commands/tools/tools_controller.py +233 -44
  31. ibm_watsonx_orchestrate/cli/main.py +2 -0
  32. ibm_watsonx_orchestrate/client/connections/connections_client.py +4 -1
  33. ibm_watsonx_orchestrate/client/tools/tempus_client.py +3 -0
  34. ibm_watsonx_orchestrate/client/tools/tool_client.py +5 -2
  35. ibm_watsonx_orchestrate/client/utils.py +31 -1
  36. ibm_watsonx_orchestrate/docker/compose-lite.yml +58 -7
  37. ibm_watsonx_orchestrate/docker/default.env +20 -17
  38. ibm_watsonx_orchestrate/flow_builder/flows/decorators.py +10 -2
  39. ibm_watsonx_orchestrate/flow_builder/flows/flow.py +71 -9
  40. ibm_watsonx_orchestrate/flow_builder/node.py +14 -2
  41. ibm_watsonx_orchestrate/flow_builder/types.py +36 -3
  42. ibm_watsonx_orchestrate/langflow/__init__.py +0 -0
  43. ibm_watsonx_orchestrate/langflow/langflow_utils.py +195 -0
  44. ibm_watsonx_orchestrate/langflow/lfx_deps.py +84 -0
  45. ibm_watsonx_orchestrate/utils/docker_utils.py +280 -0
  46. ibm_watsonx_orchestrate/utils/environment.py +369 -0
  47. ibm_watsonx_orchestrate/utils/utils.py +7 -3
  48. {ibm_watsonx_orchestrate-1.11.1.dist-info → ibm_watsonx_orchestrate-1.12.0.dist-info}/METADATA +2 -2
  49. {ibm_watsonx_orchestrate-1.11.1.dist-info → ibm_watsonx_orchestrate-1.12.0.dist-info}/RECORD +52 -41
  50. {ibm_watsonx_orchestrate-1.11.1.dist-info → ibm_watsonx_orchestrate-1.12.0.dist-info}/WHEEL +0 -0
  51. {ibm_watsonx_orchestrate-1.11.1.dist-info → ibm_watsonx_orchestrate-1.12.0.dist-info}/entry_points.txt +0 -0
  52. {ibm_watsonx_orchestrate-1.11.1.dist-info → ibm_watsonx_orchestrate-1.12.0.dist-info}/licenses/LICENSE +0 -0
@@ -6,7 +6,7 @@
6
6
  pkg_name = "ibm-watsonx-orchestrate"
7
7
 
8
8
 
9
- __version__ = "1.11.1"
9
+ __version__ = "1.12.0"
10
10
 
11
11
 
12
12
  from ibm_watsonx_orchestrate.utils.logging.logger import setup_logging
@@ -1,5 +1,6 @@
1
1
  import json
2
2
  import yaml
3
+ import logging
3
4
  from enum import Enum
4
5
  from typing import List, Optional, Dict
5
6
  from pydantic import BaseModel, model_validator, ConfigDict
@@ -9,6 +10,7 @@ from ibm_watsonx_orchestrate.agent_builder.knowledge_bases.knowledge_base import
9
10
  from ibm_watsonx_orchestrate.agent_builder.agents.webchat_customizations import StarterPrompts, WelcomeContent
10
11
  from pydantic import Field, AliasChoices
11
12
  from typing import Annotated
13
+ from ibm_watsonx_orchestrate.cli.commands.partners.offering.types import CATALOG_ONLY_FIELDS
12
14
  from ibm_watsonx_orchestrate.utils.exceptions import BadRequest
13
15
 
14
16
  from ibm_watsonx_orchestrate.agent_builder.tools.types import JsonSchemaObject
@@ -16,6 +18,8 @@ from ibm_watsonx_orchestrate.agent_builder.tools.types import JsonSchemaObject
16
18
  # TO-DO: this is just a placeholder. Will update this later to align with backend
17
19
  DEFAULT_LLM = "watsonx/meta-llama/llama-3-2-90b-vision-instruct"
18
20
 
21
+ logger = logging.getLogger(__name__)
22
+
19
23
  # Handles yaml formatting for multiline strings to improve readability
20
24
  def str_presenter(dumper, data):
21
25
  if len(data.splitlines()) > 1: # check for multiline string
@@ -84,7 +88,7 @@ class BaseAgentSpec(BaseModel):
84
88
  dumped = self.model_dump(mode='json', exclude_unset=True, exclude_none=True)
85
89
  with open(file, 'w') as f:
86
90
  if file.endswith('.yaml') or file.endswith('.yml'):
87
- yaml.dump(dumped, f, sort_keys=False)
91
+ yaml.dump(dumped, f, sort_keys=False, allow_unicode=True)
88
92
  elif file.endswith('.json'):
89
93
  json.dump(dumped, f, indent=2)
90
94
  else:
@@ -93,6 +97,19 @@ class BaseAgentSpec(BaseModel):
93
97
  def dumps_spec(self) -> str:
94
98
  dumped = self.model_dump(mode='json', exclude_none=True)
95
99
  return json.dumps(dumped, indent=2)
100
+
101
+ @model_validator(mode="before")
102
+ def validate_agent_fields(cls,values):
103
+ return drop_catalog_fields(values)
104
+
105
+
106
+ def drop_catalog_fields(values: dict):
107
+ for field in CATALOG_ONLY_FIELDS:
108
+ if values.get(field):
109
+ logger.warning(f"Field '{field}' is only used when publishing to the catalog, dropping this field for import")
110
+ del values[field]
111
+ return values
112
+
96
113
 
97
114
  # ===============================
98
115
  # NATIVE AGENT TYPES
@@ -202,8 +219,8 @@ def validate_agent_fields(values: dict) -> dict:
202
219
  # ===============================
203
220
 
204
221
  class ExternalAgentConfig(BaseModel):
205
- hidden: bool = False
206
- enable_cot: bool = False
222
+ hidden: Optional[bool] = False
223
+ enable_cot: Optional[bool] = False
207
224
 
208
225
  class ExternalAgentSpec(BaseAgentSpec):
209
226
  model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -226,8 +243,8 @@ class ExternalAgentSpec(BaseAgentSpec):
226
243
  # The get api responds with a flat object with no config
227
244
  if values.get("config") is None:
228
245
  values["config"] = {}
229
- values["config"]["enable_cot"] = values.get("enable_cot", None)
230
- values["config"]["hidden"] = values.get("hidden", None)
246
+ values["config"]["enable_cot"] = values.get("enable_cot", False)
247
+ values["config"]["hidden"] = values.get("hidden", False)
231
248
  return validate_external_agent_fields(values)
232
249
 
233
250
  @model_validator(mode="after")
@@ -14,7 +14,7 @@ from ibm_watsonx_orchestrate.agent_builder.connections.types import (
14
14
  CONNECTION_TYPE_CREDENTIAL_MAPPING
15
15
  )
16
16
 
17
- from ibm_watsonx_orchestrate.utils.utils import sanatize_app_id
17
+ from ibm_watsonx_orchestrate.utils.utils import sanitize_app_id
18
18
  from ibm_watsonx_orchestrate.utils.exceptions import BadRequest
19
19
 
20
20
  logger = logging.getLogger(__name__)
@@ -111,7 +111,7 @@ def _get_credentials_model(connection_type: ConnectionSecurityScheme, app_id: st
111
111
  return _build_credentials_model(credentials_type=credentials_type, vars=variables, base_prefix=base_prefix)
112
112
 
113
113
  def get_connection_type(app_id: str) -> ConnectionSecurityScheme:
114
- sanitized_app_id = sanatize_app_id(app_id=app_id)
114
+ sanitized_app_id = sanitize_app_id(app_id=app_id)
115
115
  expected_schema_key = f"WXO_SECURITY_SCHEMA_{sanitized_app_id}"
116
116
  expected_schema = os.environ.get(expected_schema_key)
117
117
 
@@ -129,7 +129,7 @@ def get_connection_type(app_id: str) -> ConnectionSecurityScheme:
129
129
  return expected_schema
130
130
 
131
131
  def get_application_connection_credentials(type: ConnectionType, app_id: str) -> CREDENTIALS:
132
- sanitized_app_id = sanatize_app_id(app_id=app_id)
132
+ sanitized_app_id = sanitize_app_id(app_id=app_id)
133
133
  expected_schema = get_connection_type(app_id=app_id)
134
134
  requested_schema = connection_type_security_schema_map.get(type)
135
135
 
@@ -1,7 +1,11 @@
1
+ import logging
1
2
  from pydantic import BaseModel, Field, AliasChoices, model_validator
2
3
  from typing import Optional, Union, TypeVar, List
3
4
  from enum import Enum
4
5
 
6
+
7
+ logger = logging.getLogger(__name__)
8
+
5
9
  class ConnectionKind(str, Enum):
6
10
  basic = 'basic'
7
11
  bearer = 'bearer'
@@ -145,6 +149,16 @@ class ConnectionConfiguration(BaseModel):
145
149
 
146
150
  super().__init__(*args, **kwargs)
147
151
 
152
+ @model_validator(mode="before")
153
+ def validate_auth_scheme(self):
154
+ if self.get('auth_type'):
155
+ try:
156
+ self['auth_type'] = ConnectionAuthType(self.get('auth_type'))
157
+ except:
158
+ logger.warning(f"Unsupported auth type '{self.get('auth_type')}', this will be removed from the configuration data")
159
+ self['auth_type'] = None
160
+ return self
161
+
148
162
  @model_validator(mode="after")
149
163
  def validate_config(self):
150
164
  conn_type = None
@@ -23,6 +23,7 @@ class ModelProvider(str, Enum):
23
23
  STABILITY_AI = 'stability-ai'
24
24
  TOGETHER_AI = 'together-ai'
25
25
  WATSONX = 'watsonx'
26
+ X_AI='x-ai'
26
27
 
27
28
  def __str__(self):
28
29
  return self.value
@@ -18,7 +18,7 @@ class BaseToolkit:
18
18
  dumped = self.__toolkit_spec__.model_dump(mode='json', exclude_unset=True, exclude_none=True, by_alias=True)
19
19
  with open(file, 'w') as f:
20
20
  if file.endswith('.yaml') or file.endswith('.yml'):
21
- yaml.dump(dumped, f)
21
+ yaml.dump(dumped, f, allow_unicode=True)
22
22
  elif file.endswith('.json'):
23
23
  json.dump(dumped, f, indent=2)
24
24
  else:
@@ -1,4 +1,5 @@
1
1
  from .base_tool import BaseTool
2
2
  from .python_tool import tool, PythonTool, get_all_python_tools
3
3
  from .openapi_tool import create_openapi_json_tool, create_openapi_json_tool_from_uri, create_openapi_json_tools_from_uri, OpenAPITool, HTTPException
4
+ from .langflow_tool import LangflowTool
4
5
  from .types import ToolPermission, JsonSchemaObject, ToolRequestBody, ToolResponseBody, OpenApiSecurityScheme, OpenApiToolBinding, PythonToolBinding, WxFlowsToolBinding, SkillToolBinding, ClientSideToolBinding, ToolBinding, ToolSpec
@@ -19,7 +19,7 @@ class BaseTool:
19
19
  dumped = self.__tool_spec__.model_dump(mode='json', exclude_unset=True, exclude_none=True, by_alias=True)
20
20
  with open(file, 'w') as f:
21
21
  if file.endswith('.yaml') or file.endswith('.yml'):
22
- yaml.dump(dumped, f)
22
+ yaml.dump(dumped, f, allow_unicode=True)
23
23
  elif file.endswith('.json'):
24
24
  json.dump(dumped, f, indent=2)
25
25
  else:
@@ -0,0 +1,184 @@
1
+ import json
2
+ import re
3
+
4
+ from pydantic import BaseModel
5
+ import rich
6
+
7
+ from ibm_watsonx_orchestrate.agent_builder.connections.types import ConnectionSecurityScheme
8
+ from ibm_watsonx_orchestrate.langflow.langflow_utils import parse_langflow_model
9
+ from .base_tool import BaseTool
10
+ from .types import LangflowToolBinding, ToolBinding, ToolPermission, ToolRequestBody, ToolResponseBody, ToolSpec
11
+ from ibm_watsonx_orchestrate.utils.exceptions import BadRequest
12
+
13
+ MIN_LANGFLOW_VERSION = [1,5,0]
14
+ LANGFLOW_CHAT_INPUT_LABEL = "ChatInput"
15
+ LANGFLOW_CHAT_OUTPUT_LABEL = "ChatOutput"
16
+ VALID_NAME_PATTERN = re.compile("^[a-zA-Z](\\w|_)+$")
17
+
18
+ class LangflowTool(BaseTool):
19
+ # additional python module requirements for langflow based tools
20
+ requirements: list[str] = []
21
+
22
+ def __init__(self,spec: ToolSpec):
23
+ BaseTool.__init__(self,spec=spec)
24
+
25
+ if self.__tool_spec__.binding.langflow is None:
26
+ raise BadRequest('Missing langflow binding')
27
+
28
+
29
+ def __repr__(self):
30
+ return f"LangflowTool(name='{self.__tool_spec__.name}', description='{self.__tool_spec__.description}')"
31
+
32
+
33
+ def __str__(self):
34
+ return self.__repr__()
35
+
36
+ def validate_langflow_version(version_string: str) -> bool:
37
+ version_nums = map(int, re.findall(r"\d+",version_string))
38
+ for i,n in enumerate(version_nums):
39
+ if i >= len(MIN_LANGFLOW_VERSION) or MIN_LANGFLOW_VERSION[i] < n:
40
+ break
41
+ if MIN_LANGFLOW_VERSION[i] > n:
42
+ return False
43
+ return True
44
+
45
+
46
+ def extract_langflow_nodes(tool_definition: dict, node_type: str) -> dict:
47
+ return [n for n in tool_definition.get('data',{}).get('nodes',{}) if n.get('data',{}).get('type') == node_type]
48
+
49
+ def langflow_input_schema(tool_definition: dict = None) -> ToolRequestBody:
50
+
51
+ chat_input_nodes = extract_langflow_nodes(tool_definition=tool_definition,node_type=LANGFLOW_CHAT_INPUT_LABEL)
52
+
53
+ if len(chat_input_nodes) < 1:
54
+ raise ValueError(f"No '{LANGFLOW_CHAT_INPUT_LABEL}' node found in langflow tool")
55
+ if len(chat_input_nodes) > 1:
56
+ raise ValueError(f"Too many '{LANGFLOW_CHAT_INPUT_LABEL}' nodes found in langlow tool")
57
+
58
+ input_description = chat_input_nodes[0].get("data",{}).get("node",{}).get("description","")
59
+
60
+ return ToolRequestBody(
61
+ type= "object",
62
+ properties= {
63
+ "input": {
64
+ "description": input_description,
65
+ "type": "string"
66
+ }
67
+ },
68
+ required= ["input"]
69
+ )
70
+
71
+ def langflow_output_schema(tool_definition: dict = None):
72
+
73
+ chat_output_nodes = extract_langflow_nodes(tool_definition=tool_definition,node_type=LANGFLOW_CHAT_OUTPUT_LABEL)
74
+
75
+ if len(chat_output_nodes) < 1:
76
+ raise ValueError(f"No '{LANGFLOW_CHAT_OUTPUT_LABEL}' node found in langflow tool")
77
+ if len(chat_output_nodes) > 1:
78
+ output_description = ""
79
+ else:
80
+ output_description = chat_output_nodes[0].get("data",{}).get("node",{}).get("description","")
81
+
82
+ return ToolResponseBody(
83
+ description=output_description,
84
+ type= "string"
85
+ )
86
+
87
+ def create_langflow_tool(
88
+ tool_definition: dict,
89
+ connections: dict = None,
90
+ ) -> LangflowTool:
91
+
92
+ name = tool_definition.get('name')
93
+ if not name:
94
+ raise ValueError('Provided tool definition does not have a name')
95
+
96
+ if VALID_NAME_PATTERN.match(name) is None:
97
+ raise ValueError(f"Langflow tool name contains unsupported characters. Only alphanumeric characters and underscores are allowed, and must not start with a number or underscore.")
98
+
99
+ description = tool_definition.get('description')
100
+ if not description:
101
+ raise ValueError('Provided tool definition does not have a description')
102
+
103
+ langflow_id = tool_definition.get('id')
104
+
105
+ langflow_version = tool_definition.get('last_tested_version')
106
+ if not langflow_version:
107
+ raise ValueError('No langflow version detected in tool definition')
108
+ if not validate_langflow_version(langflow_version):
109
+ raise ValueError(f"Langflow version is below minimum requirements, found '{langflow_version}', miniumum required version '{'.'.join(map(str,MIN_LANGFLOW_VERSION))}'")
110
+
111
+ # find all the component in Langflow and display its credential
112
+ langflow_spec = parse_langflow_model(tool_definition)
113
+ if langflow_spec:
114
+ rich.print(f"[bold white]Langflow version used: {langflow_version}[/bold white]")
115
+ rich.print("Please ensure this flow is compatible with the Langflow version bundled in ADK.")
116
+ rich.print("\nLangflow components:")
117
+
118
+ table = rich.table.Table(show_header=True, header_style="bold white", show_lines=True)
119
+ column_args = {
120
+ "ID": {},
121
+ "Name": {},
122
+ "Credentials": {},
123
+ "Requirements": {}
124
+ }
125
+ for column in column_args:
126
+ table.add_column(column,**column_args[column])
127
+
128
+ requirements = set()
129
+ api_key_not_set = False
130
+ for component in langflow_spec.components:
131
+ if component.credentials and len(component.credentials) > 0:
132
+ # create a command separated list with newline
133
+ component_creds = None
134
+ for k, v in component.credentials.items():
135
+ if v is None or v == "":
136
+ v = 'NOT SET'
137
+ api_key_not_set = True
138
+ if component_creds is None:
139
+ component_creds = f"{k} {v}"
140
+ else:
141
+ component_creds += "\n" + f"{k} {v}"
142
+ else:
143
+ component_creds = "N/A"
144
+
145
+ if component.requirements and len(component.requirements) > 0:
146
+ # create a command separated list with newline
147
+ component_req = "\n".join([f"{k}" for k in component.requirements])
148
+ for r in component.requirements:
149
+ requirements.add(r)
150
+ else:
151
+ component_req = "N/A"
152
+ table.add_row(component.id,component.name,component_creds,component_req)
153
+ rich.print(table)
154
+
155
+ rich.print("[bold yellow]Tip:[/bold yellow] Langflow tool might require additional python modules. Identified requirements will be added.")
156
+ rich.print("[bold yellow]Tip:[/bold yellow] Avoid hardcoding sensitive values. Use Orchestrate connections to manage secrets securely.")
157
+ if api_key_not_set:
158
+ rich.print("[bold yellow]Warning:[/bold yellow] Some required api key(s) were not set in the flow. Please adjust the flow to include them.")
159
+ rich.print("Ensure each credential follows the <app-id>_<variable> naming convention within the Langflow model.")
160
+
161
+ for connection in connections:
162
+ rich.print(f"* Connection: {connection} → Suggested naming: {connection}_<variable>")
163
+
164
+ spec = ToolSpec(
165
+ name=name,
166
+ description=description,
167
+ permission=ToolPermission('read_only')
168
+ )
169
+
170
+ spec.input_schema = langflow_input_schema(tool_definition=tool_definition)
171
+
172
+ spec.output_schema = langflow_output_schema(tool_definition=tool_definition)
173
+
174
+ spec.binding = ToolBinding(
175
+ langflow=LangflowToolBinding(
176
+ langflow_id=langflow_id,
177
+ langflow_version=langflow_version,
178
+ connections=connections
179
+ )
180
+ )
181
+
182
+ tool = LangflowTool(spec=spec)
183
+ tool.requirements = requirements
184
+ return tool
@@ -56,9 +56,9 @@ class OpenAPITool(BaseTool):
56
56
  spec = ToolSpec.model_validate(json.load(f))
57
57
  else:
58
58
  raise BadRequest('file must end in .json, .yaml, or .yml')
59
-
60
- if spec.binding.openapi is None or spec.binding.openapi is None:
61
- raise BadRequest('failed to load python tool as the tool had no openapi binding')
59
+
60
+ if spec is None or spec.binding is None or spec.binding.openapi is None:
61
+ raise BadRequest('failed to load openapi tool as the tool had no openapi binding')
62
62
 
63
63
  return OpenAPITool(spec=spec)
64
64
 
@@ -80,6 +80,7 @@ def create_openapi_json_tool(
80
80
  http_success_response_code: int = 200,
81
81
  http_response_content_type='application/json',
82
82
  name: str = None,
83
+ display_name: str = None,
83
84
  description: str = None,
84
85
  permission: ToolPermission = None,
85
86
  input_schema: ToolRequestBody = None,
@@ -130,8 +131,10 @@ def create_openapi_json_tool(
130
131
  raise BadRequest(
131
132
  f"No description provided for tool. {http_method}: {http_path} did not specify a description field, and no description was provided")
132
133
 
134
+ spec_display_name = display_name if display_name else route_spec.get('summary')
133
135
  spec = ToolSpec(
134
136
  name=spec_name,
137
+ display_name=spec_display_name,
135
138
  description=spec_description,
136
139
  permission=spec_permission
137
140
  )
@@ -348,6 +351,7 @@ async def create_openapi_json_tool_from_uri(
348
351
  http_response_content_type='application/json',
349
352
  permission: ToolPermission = ToolPermission.READ_ONLY,
350
353
  name: str = None,
354
+ display_name: str = None,
351
355
  description: str = None,
352
356
  input_schema: ToolRequestBody = None,
353
357
  output_schema: ToolResponseBody = None,
@@ -362,6 +366,7 @@ async def create_openapi_json_tool_from_uri(
362
366
  :param http_success_response_code: Which http status code should be considered a successful call (defaults to 200)
363
367
  :param http_response_content_type: Which http response type should be considered successful (default to application/json)
364
368
  :param name: The name of the resulting tool (used to invoke the tool by the agent)
369
+ :param display_name: The name of the resulting tool to be displayed
365
370
  :param description: The description of the resulting tool (used as the semantic layer to help the agent with tool selection)
366
371
  :param permission: Which orchestrate permission level does a user need to have to invoke this tool
367
372
  :param input_schema: The JSONSchema of the inputs to the http request
@@ -379,6 +384,7 @@ async def create_openapi_json_tool_from_uri(
379
384
  http_response_content_type=http_response_content_type,
380
385
  permission=permission,
381
386
  name=name,
387
+ display_name=display_name,
382
388
  description=description,
383
389
  input_schema=input_schema,
384
390
  output_schema=output_schema,
@@ -1,8 +1,9 @@
1
1
  from enum import Enum
2
2
  from typing import List, Any, Dict, Literal, Optional, Union
3
3
 
4
- from pydantic import BaseModel, model_validator, ConfigDict, Field, AliasChoices
4
+ from pydantic import BaseModel, model_validator, ConfigDict, Field, AliasChoices, ValidationError
5
5
  from ibm_watsonx_orchestrate.utils.exceptions import BadRequest
6
+ from ibm_watsonx_orchestrate.agent_builder.connections import KeyValueConnectionCredentials
6
7
 
7
8
 
8
9
  class ToolPermission(str, Enum):
@@ -159,6 +160,21 @@ class FlowToolBinding(BaseModel):
159
160
  flow_id: str
160
161
  model: Optional[dict] = None
161
162
 
163
+ class LangflowToolBinding(BaseModel):
164
+ langflow_id: Optional[str] = None
165
+ project_id: Optional[str] = None
166
+ langflow_version: str
167
+ connections: Optional[dict] = None
168
+
169
+ @model_validator(mode='after')
170
+ def validate_connection_type(self) -> 'LangflowToolBinding':
171
+ if self.connections:
172
+ for k,v in self.connections.items():
173
+ if not v:
174
+ raise ValidationError(f"No connection provided for '{k}'")
175
+ return self
176
+
177
+
162
178
  class ToolBinding(BaseModel):
163
179
  openapi: OpenApiToolBinding = None
164
180
  python: PythonToolBinding = None
@@ -167,6 +183,7 @@ class ToolBinding(BaseModel):
167
183
  client_side: ClientSideToolBinding = None
168
184
  mcp: McpToolBinding = None
169
185
  flow: FlowToolBinding = None
186
+ langflow: LangflowToolBinding = None
170
187
 
171
188
  @model_validator(mode='after')
172
189
  def validate_binding_type(self) -> 'ToolBinding':
@@ -177,7 +194,8 @@ class ToolBinding(BaseModel):
177
194
  self.skill is not None,
178
195
  self.client_side is not None,
179
196
  self.mcp is not None,
180
- self.flow is not None
197
+ self.flow is not None,
198
+ self.langflow is not None
181
199
  ]
182
200
  if sum(bindings) == 0:
183
201
  raise BadRequest("One binding must be set")
@@ -1309,7 +1309,7 @@ class AgentsController:
1309
1309
  return AssistantAgent.model_validate(assistant_result)
1310
1310
 
1311
1311
 
1312
- def export_agent(self, name: str, kind: AgentKind, output_path: str, agent_only_flag: bool=False, zip_file_out: zipfile.ZipFile | None = None) -> None:
1312
+ def export_agent(self, name: str, kind: AgentKind, output_path: str, agent_only_flag: bool=False, zip_file_out: zipfile.ZipFile | None = None, with_tool_spec_file: bool = False) -> None:
1313
1313
  output_file = Path(output_path)
1314
1314
  output_file_extension = output_file.suffix
1315
1315
  output_file_name = output_file.stem
@@ -1330,7 +1330,7 @@ class AgentsController:
1330
1330
  if agent_only_flag:
1331
1331
  logger.info(f"Exported agent definition for '{name}' to '{output_path}'")
1332
1332
  with open(output_path, 'w') as outfile:
1333
- yaml.dump(agent_spec_file_content, outfile, sort_keys=False, default_flow_style=False)
1333
+ yaml.dump(agent_spec_file_content, outfile, sort_keys=False, default_flow_style=False, allow_unicode=True)
1334
1334
  return
1335
1335
 
1336
1336
  close_file_flag = False
@@ -1340,7 +1340,7 @@ class AgentsController:
1340
1340
 
1341
1341
  logger.info(f"Exporting agent definition for '{name}'")
1342
1342
 
1343
- agent_spec_yaml = yaml.dump(agent_spec_file_content, sort_keys=False, default_flow_style=False)
1343
+ agent_spec_yaml = yaml.dump(agent_spec_file_content, sort_keys=False, default_flow_style=False, allow_unicode=True)
1344
1344
  agent_spec_yaml_bytes = agent_spec_yaml.encode("utf-8")
1345
1345
  agent_spec_yaml_file = io.BytesIO(agent_spec_yaml_bytes)
1346
1346
 
@@ -1357,15 +1357,22 @@ class AgentsController:
1357
1357
  agent_spec_yaml_file.getvalue()
1358
1358
  )
1359
1359
 
1360
- tools_contoller = ToolsController()
1361
- for tool_name in agent_spec_file_content.get("tools", []):
1360
+ agent_tools = agent_spec_file_content.get("tools", [])
1361
+
1362
+ tools_controller = ToolsController()
1363
+ tools_client = tools_controller.get_client()
1364
+ tool_specs = None
1365
+ if with_tool_spec_file:
1366
+ tool_specs = {t.get('name'):t for t in tools_client.get_drafts_by_names(agent_tools) if t.get('name')}
1367
+
1368
+ for tool_name in agent_tools:
1362
1369
 
1363
1370
  base_tool_file_path = f"{output_file_name}/tools/{tool_name}/"
1364
1371
  if check_file_in_zip(file_path=base_tool_file_path, zip_file=zip_file_out):
1365
1372
  continue
1366
1373
 
1367
1374
  logger.info(f"Exporting tool '{tool_name}'")
1368
- tool_artifact_bytes = tools_contoller.download_tool(tool_name)
1375
+ tool_artifact_bytes = tools_controller.download_tool(tool_name)
1369
1376
  if not tool_artifact_bytes:
1370
1377
  continue
1371
1378
 
@@ -1377,6 +1384,12 @@ class AgentsController:
1377
1384
  f"{base_tool_file_path}{item.filename}",
1378
1385
  buffer
1379
1386
  )
1387
+ if with_tool_spec_file and tool_specs:
1388
+ current_spec = tool_specs[tool_name]
1389
+ zip_file_out.writestr(
1390
+ f"{base_tool_file_path}config.json",
1391
+ ToolSpec.model_validate(current_spec).model_dump_json(exclude_unset=True,indent=2)
1392
+ )
1380
1393
 
1381
1394
  for kb_name in agent_spec_file_content.get("knowledge_base", []):
1382
1395
  logger.warning(f"Skipping {kb_name}, knowledge_bases are currently unsupported by export")
@@ -6,6 +6,7 @@ from ibm_watsonx_orchestrate.cli.commands.connections.connections_controller imp
6
6
  remove_connection,
7
7
  list_connections,
8
8
  import_connection,
9
+ export_connection,
9
10
  configure_connection,
10
11
  set_credentials_connection,
11
12
  set_identity_provider_connection,
@@ -65,6 +66,23 @@ def import_connection_command(
65
66
  ):
66
67
  import_connection(file=file)
67
68
 
69
+ @connections_app.command(name="export")
70
+ def export_connection_command(
71
+ app_id: Annotated[
72
+ str, typer.Option(
73
+ '--app-id', '-a',
74
+ help='The app id of the connection you wish to export.'
75
+ )
76
+ ],
77
+ output_file: Annotated[
78
+ str, typer.Option(
79
+ '--output', '-o',
80
+ help='Path to where the exported connection should be saved.'
81
+ )
82
+ ] = None
83
+ ):
84
+ export_connection(app_id=app_id,output_file=output_file)
85
+
68
86
  @connections_app.command(name="configure")
69
87
  def configure_connection_command(
70
88
  app_id: Annotated[