ibm-watsonx-orchestrate 1.12.0b0__py3-none-any.whl → 1.12.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ibm_watsonx_orchestrate/__init__.py +2 -1
- ibm_watsonx_orchestrate/agent_builder/agents/types.py +5 -5
- ibm_watsonx_orchestrate/agent_builder/models/types.py +1 -0
- ibm_watsonx_orchestrate/agent_builder/toolkits/base_toolkit.py +1 -1
- ibm_watsonx_orchestrate/agent_builder/tools/base_tool.py +1 -1
- ibm_watsonx_orchestrate/agent_builder/tools/langflow_tool.py +61 -1
- ibm_watsonx_orchestrate/agent_builder/tools/openapi_tool.py +6 -0
- ibm_watsonx_orchestrate/cli/commands/agents/agents_controller.py +2 -2
- ibm_watsonx_orchestrate/cli/commands/connections/connections_controller.py +2 -2
- ibm_watsonx_orchestrate/cli/commands/environment/environment_command.py +5 -1
- ibm_watsonx_orchestrate/cli/commands/environment/environment_controller.py +6 -3
- ibm_watsonx_orchestrate/cli/commands/evaluations/evaluations_command.py +3 -2
- ibm_watsonx_orchestrate/cli/commands/evaluations/evaluations_controller.py +1 -1
- ibm_watsonx_orchestrate/cli/commands/models/model_provider_mapper.py +23 -4
- ibm_watsonx_orchestrate/cli/commands/partners/offering/partners_offering_controller.py +21 -4
- ibm_watsonx_orchestrate/cli/commands/partners/offering/types.py +7 -15
- ibm_watsonx_orchestrate/cli/commands/partners/partners_command.py +1 -1
- ibm_watsonx_orchestrate/cli/commands/server/server_command.py +11 -3
- ibm_watsonx_orchestrate/cli/commands/toolkit/toolkit_command.py +2 -2
- ibm_watsonx_orchestrate/cli/commands/tools/tools_controller.py +37 -8
- ibm_watsonx_orchestrate/cli/config.py +3 -1
- ibm_watsonx_orchestrate/docker/compose-lite.yml +56 -6
- ibm_watsonx_orchestrate/docker/default.env +19 -16
- ibm_watsonx_orchestrate/flow_builder/flows/decorators.py +10 -2
- ibm_watsonx_orchestrate/flow_builder/flows/flow.py +71 -9
- ibm_watsonx_orchestrate/flow_builder/node.py +14 -2
- ibm_watsonx_orchestrate/flow_builder/types.py +36 -3
- ibm_watsonx_orchestrate/langflow/__init__.py +0 -0
- ibm_watsonx_orchestrate/langflow/langflow_utils.py +195 -0
- ibm_watsonx_orchestrate/langflow/lfx_deps.py +84 -0
- ibm_watsonx_orchestrate/utils/utils.py +6 -2
- {ibm_watsonx_orchestrate-1.12.0b0.dist-info → ibm_watsonx_orchestrate-1.12.1.dist-info}/METADATA +2 -2
- {ibm_watsonx_orchestrate-1.12.0b0.dist-info → ibm_watsonx_orchestrate-1.12.1.dist-info}/RECORD +36 -33
- {ibm_watsonx_orchestrate-1.12.0b0.dist-info → ibm_watsonx_orchestrate-1.12.1.dist-info}/WHEEL +0 -0
- {ibm_watsonx_orchestrate-1.12.0b0.dist-info → ibm_watsonx_orchestrate-1.12.1.dist-info}/entry_points.txt +0 -0
- {ibm_watsonx_orchestrate-1.12.0b0.dist-info → ibm_watsonx_orchestrate-1.12.1.dist-info}/licenses/LICENSE +0 -0
@@ -88,7 +88,7 @@ class BaseAgentSpec(BaseModel):
|
|
88
88
|
dumped = self.model_dump(mode='json', exclude_unset=True, exclude_none=True)
|
89
89
|
with open(file, 'w') as f:
|
90
90
|
if file.endswith('.yaml') or file.endswith('.yml'):
|
91
|
-
yaml.dump(dumped, f, sort_keys=False)
|
91
|
+
yaml.dump(dumped, f, sort_keys=False, allow_unicode=True)
|
92
92
|
elif file.endswith('.json'):
|
93
93
|
json.dump(dumped, f, indent=2)
|
94
94
|
else:
|
@@ -219,8 +219,8 @@ def validate_agent_fields(values: dict) -> dict:
|
|
219
219
|
# ===============================
|
220
220
|
|
221
221
|
class ExternalAgentConfig(BaseModel):
|
222
|
-
hidden: bool = False
|
223
|
-
enable_cot: bool = False
|
222
|
+
hidden: Optional[bool] = False
|
223
|
+
enable_cot: Optional[bool] = False
|
224
224
|
|
225
225
|
class ExternalAgentSpec(BaseAgentSpec):
|
226
226
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
@@ -243,8 +243,8 @@ class ExternalAgentSpec(BaseAgentSpec):
|
|
243
243
|
# The get api responds with a flat object with no config
|
244
244
|
if values.get("config") is None:
|
245
245
|
values["config"] = {}
|
246
|
-
values["config"]["enable_cot"] = values.get("enable_cot",
|
247
|
-
values["config"]["hidden"] = values.get("hidden",
|
246
|
+
values["config"]["enable_cot"] = values.get("enable_cot", False)
|
247
|
+
values["config"]["hidden"] = values.get("hidden", False)
|
248
248
|
return validate_external_agent_fields(values)
|
249
249
|
|
250
250
|
@model_validator(mode="after")
|
@@ -18,7 +18,7 @@ class BaseToolkit:
|
|
18
18
|
dumped = self.__toolkit_spec__.model_dump(mode='json', exclude_unset=True, exclude_none=True, by_alias=True)
|
19
19
|
with open(file, 'w') as f:
|
20
20
|
if file.endswith('.yaml') or file.endswith('.yml'):
|
21
|
-
yaml.dump(dumped, f)
|
21
|
+
yaml.dump(dumped, f, allow_unicode=True)
|
22
22
|
elif file.endswith('.json'):
|
23
23
|
json.dump(dumped, f, indent=2)
|
24
24
|
else:
|
@@ -19,7 +19,7 @@ class BaseTool:
|
|
19
19
|
dumped = self.__tool_spec__.model_dump(mode='json', exclude_unset=True, exclude_none=True, by_alias=True)
|
20
20
|
with open(file, 'w') as f:
|
21
21
|
if file.endswith('.yaml') or file.endswith('.yml'):
|
22
|
-
yaml.dump(dumped, f)
|
22
|
+
yaml.dump(dumped, f, allow_unicode=True)
|
23
23
|
elif file.endswith('.json'):
|
24
24
|
json.dump(dumped, f, indent=2)
|
25
25
|
else:
|
@@ -2,8 +2,10 @@ import json
|
|
2
2
|
import re
|
3
3
|
|
4
4
|
from pydantic import BaseModel
|
5
|
+
import rich
|
5
6
|
|
6
7
|
from ibm_watsonx_orchestrate.agent_builder.connections.types import ConnectionSecurityScheme
|
8
|
+
from ibm_watsonx_orchestrate.langflow.langflow_utils import parse_langflow_model
|
7
9
|
from .base_tool import BaseTool
|
8
10
|
from .types import LangflowToolBinding, ToolBinding, ToolPermission, ToolRequestBody, ToolResponseBody, ToolSpec
|
9
11
|
from ibm_watsonx_orchestrate.utils.exceptions import BadRequest
|
@@ -14,6 +16,9 @@ LANGFLOW_CHAT_OUTPUT_LABEL = "ChatOutput"
|
|
14
16
|
VALID_NAME_PATTERN = re.compile("^[a-zA-Z](\\w|_)+$")
|
15
17
|
|
16
18
|
class LangflowTool(BaseTool):
|
19
|
+
# additional python module requirements for langflow based tools
|
20
|
+
requirements: list[str] = []
|
21
|
+
|
17
22
|
def __init__(self,spec: ToolSpec):
|
18
23
|
BaseTool.__init__(self,spec=spec)
|
19
24
|
|
@@ -103,6 +108,59 @@ def create_langflow_tool(
|
|
103
108
|
if not validate_langflow_version(langflow_version):
|
104
109
|
raise ValueError(f"Langflow version is below minimum requirements, found '{langflow_version}', miniumum required version '{'.'.join(map(str,MIN_LANGFLOW_VERSION))}'")
|
105
110
|
|
111
|
+
# find all the component in Langflow and display its credential
|
112
|
+
langflow_spec = parse_langflow_model(tool_definition)
|
113
|
+
if langflow_spec:
|
114
|
+
rich.print(f"[bold white]Langflow version used: {langflow_version}[/bold white]")
|
115
|
+
rich.print("Please ensure this flow is compatible with the Langflow version bundled in ADK.")
|
116
|
+
rich.print("\nLangflow components:")
|
117
|
+
|
118
|
+
table = rich.table.Table(show_header=True, header_style="bold white", show_lines=True)
|
119
|
+
column_args = {
|
120
|
+
"ID": {},
|
121
|
+
"Name": {},
|
122
|
+
"Credentials": {},
|
123
|
+
"Requirements": {}
|
124
|
+
}
|
125
|
+
for column in column_args:
|
126
|
+
table.add_column(column,**column_args[column])
|
127
|
+
|
128
|
+
requirements = set()
|
129
|
+
api_key_not_set = False
|
130
|
+
for component in langflow_spec.components:
|
131
|
+
if component.credentials and len(component.credentials) > 0:
|
132
|
+
# create a command separated list with newline
|
133
|
+
component_creds = None
|
134
|
+
for k, v in component.credentials.items():
|
135
|
+
if v is None or v == "":
|
136
|
+
v = 'NOT SET'
|
137
|
+
api_key_not_set = True
|
138
|
+
if component_creds is None:
|
139
|
+
component_creds = f"{k} {v}"
|
140
|
+
else:
|
141
|
+
component_creds += "\n" + f"{k} {v}"
|
142
|
+
else:
|
143
|
+
component_creds = "N/A"
|
144
|
+
|
145
|
+
if component.requirements and len(component.requirements) > 0:
|
146
|
+
# create a command separated list with newline
|
147
|
+
component_req = "\n".join([f"{k}" for k in component.requirements])
|
148
|
+
for r in component.requirements:
|
149
|
+
requirements.add(r)
|
150
|
+
else:
|
151
|
+
component_req = "N/A"
|
152
|
+
table.add_row(component.id,component.name,component_creds,component_req)
|
153
|
+
rich.print(table)
|
154
|
+
|
155
|
+
rich.print("[bold yellow]Tip:[/bold yellow] Langflow tool might require additional python modules. Identified requirements will be added.")
|
156
|
+
rich.print("[bold yellow]Tip:[/bold yellow] Avoid hardcoding sensitive values. Use Orchestrate connections to manage secrets securely.")
|
157
|
+
if api_key_not_set:
|
158
|
+
rich.print("[bold yellow]Warning:[/bold yellow] Some required api key(s) were not set in the flow. Please adjust the flow to include them.")
|
159
|
+
rich.print("Ensure each credential follows the <app-id>_<variable> naming convention within the Langflow model.")
|
160
|
+
|
161
|
+
for connection in connections:
|
162
|
+
rich.print(f"* Connection: {connection} → Suggested naming: {connection}_<variable>")
|
163
|
+
|
106
164
|
spec = ToolSpec(
|
107
165
|
name=name,
|
108
166
|
description=description,
|
@@ -121,4 +179,6 @@ def create_langflow_tool(
|
|
121
179
|
)
|
122
180
|
)
|
123
181
|
|
124
|
-
|
182
|
+
tool = LangflowTool(spec=spec)
|
183
|
+
tool.requirements = requirements
|
184
|
+
return tool
|
@@ -80,6 +80,7 @@ def create_openapi_json_tool(
|
|
80
80
|
http_success_response_code: int = 200,
|
81
81
|
http_response_content_type='application/json',
|
82
82
|
name: str = None,
|
83
|
+
display_name: str = None,
|
83
84
|
description: str = None,
|
84
85
|
permission: ToolPermission = None,
|
85
86
|
input_schema: ToolRequestBody = None,
|
@@ -130,8 +131,10 @@ def create_openapi_json_tool(
|
|
130
131
|
raise BadRequest(
|
131
132
|
f"No description provided for tool. {http_method}: {http_path} did not specify a description field, and no description was provided")
|
132
133
|
|
134
|
+
spec_display_name = display_name if display_name else route_spec.get('summary')
|
133
135
|
spec = ToolSpec(
|
134
136
|
name=spec_name,
|
137
|
+
display_name=spec_display_name,
|
135
138
|
description=spec_description,
|
136
139
|
permission=spec_permission
|
137
140
|
)
|
@@ -348,6 +351,7 @@ async def create_openapi_json_tool_from_uri(
|
|
348
351
|
http_response_content_type='application/json',
|
349
352
|
permission: ToolPermission = ToolPermission.READ_ONLY,
|
350
353
|
name: str = None,
|
354
|
+
display_name: str = None,
|
351
355
|
description: str = None,
|
352
356
|
input_schema: ToolRequestBody = None,
|
353
357
|
output_schema: ToolResponseBody = None,
|
@@ -362,6 +366,7 @@ async def create_openapi_json_tool_from_uri(
|
|
362
366
|
:param http_success_response_code: Which http status code should be considered a successful call (defaults to 200)
|
363
367
|
:param http_response_content_type: Which http response type should be considered successful (default to application/json)
|
364
368
|
:param name: The name of the resulting tool (used to invoke the tool by the agent)
|
369
|
+
:param display_name: The name of the resulting tool to be displayed
|
365
370
|
:param description: The description of the resulting tool (used as the semantic layer to help the agent with tool selection)
|
366
371
|
:param permission: Which orchestrate permission level does a user need to have to invoke this tool
|
367
372
|
:param input_schema: The JSONSchema of the inputs to the http request
|
@@ -379,6 +384,7 @@ async def create_openapi_json_tool_from_uri(
|
|
379
384
|
http_response_content_type=http_response_content_type,
|
380
385
|
permission=permission,
|
381
386
|
name=name,
|
387
|
+
display_name=display_name,
|
382
388
|
description=description,
|
383
389
|
input_schema=input_schema,
|
384
390
|
output_schema=output_schema,
|
@@ -1330,7 +1330,7 @@ class AgentsController:
|
|
1330
1330
|
if agent_only_flag:
|
1331
1331
|
logger.info(f"Exported agent definition for '{name}' to '{output_path}'")
|
1332
1332
|
with open(output_path, 'w') as outfile:
|
1333
|
-
yaml.dump(agent_spec_file_content, outfile, sort_keys=False, default_flow_style=False)
|
1333
|
+
yaml.dump(agent_spec_file_content, outfile, sort_keys=False, default_flow_style=False, allow_unicode=True)
|
1334
1334
|
return
|
1335
1335
|
|
1336
1336
|
close_file_flag = False
|
@@ -1340,7 +1340,7 @@ class AgentsController:
|
|
1340
1340
|
|
1341
1341
|
logger.info(f"Exporting agent definition for '{name}'")
|
1342
1342
|
|
1343
|
-
agent_spec_yaml = yaml.dump(agent_spec_file_content, sort_keys=False, default_flow_style=False)
|
1343
|
+
agent_spec_yaml = yaml.dump(agent_spec_file_content, sort_keys=False, default_flow_style=False, allow_unicode=True)
|
1344
1344
|
agent_spec_yaml_bytes = agent_spec_yaml.encode("utf-8")
|
1345
1345
|
agent_spec_yaml_file = io.BytesIO(agent_spec_yaml_bytes)
|
1346
1346
|
|
@@ -575,7 +575,7 @@ def export_connection(output_file: str, app_id: str | None = None, connection_id
|
|
575
575
|
case '.zip':
|
576
576
|
zip_file = zipfile.ZipFile(output_path, "w")
|
577
577
|
|
578
|
-
connection_yaml = yaml.dump(combined_connections, sort_keys=False, default_flow_style=False)
|
578
|
+
connection_yaml = yaml.dump(combined_connections, sort_keys=False, default_flow_style=False, allow_unicode=True)
|
579
579
|
connection_yaml_bytes = connection_yaml.encode("utf-8")
|
580
580
|
connection_yaml_file = io.BytesIO(connection_yaml_bytes)
|
581
581
|
|
@@ -588,7 +588,7 @@ def export_connection(output_file: str, app_id: str | None = None, connection_id
|
|
588
588
|
case '.yaml' | '.yml':
|
589
589
|
with open(output_path,'w') as yaml_file:
|
590
590
|
yaml_file.write(
|
591
|
-
yaml.dump(combined_connections, sort_keys=False, default_flow_style=False)
|
591
|
+
yaml.dump(combined_connections, sort_keys=False, default_flow_style=False, allow_unicode=True)
|
592
592
|
)
|
593
593
|
|
594
594
|
logger.info(f"Successfully exported connection file for {app_id}")
|
@@ -43,9 +43,13 @@ def activate_env(
|
|
43
43
|
test_package_version_override: Annotated[
|
44
44
|
str,
|
45
45
|
typer.Option("--test-package-version-override", help="Which prereleased package version to reference when using --registry testpypi", hidden=True),
|
46
|
+
] = None,
|
47
|
+
skip_version_check: Annotated[
|
48
|
+
bool,
|
49
|
+
typer.Option('--skip-version-check/--enable-version-check', help='Use this flag to skip validating that adk version in use exists in pypi (for clients who mirror the ADK to a local registry and do not have local access to pypi).')
|
46
50
|
] = None
|
47
51
|
):
|
48
|
-
environment_controller.activate(name=name, apikey=apikey,username=username, password=password, registry=registry, test_package_version_override=test_package_version_override)
|
52
|
+
environment_controller.activate(name=name, apikey=apikey, username=username, password=password, registry=registry, test_package_version_override=test_package_version_override, skip_version_check=skip_version_check)
|
49
53
|
|
50
54
|
|
51
55
|
@environment_app.command(name="add")
|
@@ -18,8 +18,9 @@ from ibm_watsonx_orchestrate.cli.config import (
|
|
18
18
|
ENV_IAM_URL_OPT,
|
19
19
|
ENVIRONMENTS_SECTION_HEADER,
|
20
20
|
PROTECTED_ENV_NAME,
|
21
|
-
ENV_AUTH_TYPE, PYTHON_REGISTRY_HEADER, PYTHON_REGISTRY_TYPE_OPT, PYTHON_REGISTRY_TEST_PACKAGE_VERSION_OVERRIDE_OPT,
|
22
|
-
|
21
|
+
ENV_AUTH_TYPE, PYTHON_REGISTRY_HEADER, PYTHON_REGISTRY_TYPE_OPT, PYTHON_REGISTRY_TEST_PACKAGE_VERSION_OVERRIDE_OPT,
|
22
|
+
BYPASS_SSL, VERIFY,
|
23
|
+
DEFAULT_CONFIG_FILE_CONTENT, PYTHON_REGISTRY_SKIP_VERSION_CHECK_OPT
|
23
24
|
)
|
24
25
|
from ibm_watsonx_orchestrate.client.client import Client
|
25
26
|
from ibm_watsonx_orchestrate.client.client_errors import ClientError
|
@@ -131,7 +132,7 @@ def _login(name: str, apikey: str = None, username: str = None, password: str =
|
|
131
132
|
except ClientError as e:
|
132
133
|
raise ClientError(e)
|
133
134
|
|
134
|
-
def activate(name: str, apikey: str=None, username: str=None, password: str=None, registry: RegistryType=None, test_package_version_override=None) -> None:
|
135
|
+
def activate(name: str, apikey: str=None, username: str=None, password: str=None, registry: RegistryType=None, test_package_version_override=None, skip_version_check=None) -> None:
|
135
136
|
cfg = Config()
|
136
137
|
auth_cfg = Config(AUTH_CONFIG_FILE_FOLDER, AUTH_CONFIG_FILE)
|
137
138
|
env_cfg = cfg.read(ENVIRONMENTS_SECTION_HEADER, name)
|
@@ -159,6 +160,8 @@ def activate(name: str, apikey: str=None, username: str=None, password: str=None
|
|
159
160
|
elif cfg.read(PYTHON_REGISTRY_HEADER, PYTHON_REGISTRY_TYPE_OPT) is None:
|
160
161
|
cfg.write(PYTHON_REGISTRY_HEADER, PYTHON_REGISTRY_TYPE_OPT, DEFAULT_CONFIG_FILE_CONTENT[PYTHON_REGISTRY_HEADER][PYTHON_REGISTRY_TYPE_OPT])
|
161
162
|
cfg.write(PYTHON_REGISTRY_HEADER, PYTHON_REGISTRY_TEST_PACKAGE_VERSION_OVERRIDE_OPT, test_package_version_override)
|
163
|
+
if skip_version_check is not None:
|
164
|
+
cfg.write(PYTHON_REGISTRY_HEADER, PYTHON_REGISTRY_SKIP_VERSION_CHECK_OPT, skip_version_check)
|
162
165
|
|
163
166
|
logger.info(f"Environment '{name}' is now active")
|
164
167
|
is_cpd = is_cpd_env(url)
|
@@ -54,7 +54,8 @@ def read_env_file(env_path: Path|str) -> dict:
|
|
54
54
|
def validate_watsonx_credentials(user_env_file: str) -> bool:
|
55
55
|
required_sets = [
|
56
56
|
["WATSONX_SPACE_ID", "WATSONX_APIKEY"],
|
57
|
-
["WO_INSTANCE", "WO_API_KEY"]
|
57
|
+
["WO_INSTANCE", "WO_API_KEY"],
|
58
|
+
["WO_INSTANCE", "WO_PASSWORD", "WO_USERNAME"]
|
58
59
|
]
|
59
60
|
|
60
61
|
def has_valid_keys(env: dict) -> bool:
|
@@ -75,7 +76,7 @@ def validate_watsonx_credentials(user_env_file: str) -> bool:
|
|
75
76
|
user_env = read_env_file(user_env_file)
|
76
77
|
|
77
78
|
if not has_valid_keys(user_env):
|
78
|
-
logger.error("Error: The environment file does not contain the required keys: either WATSONX_SPACE_ID and WATSONX_APIKEY or WO_INSTANCE and WO_API_KEY.")
|
79
|
+
logger.error("Error: The environment file does not contain the required keys: either WATSONX_SPACE_ID and WATSONX_APIKEY or WO_INSTANCE and WO_API_KEY or WO_INSTANCE and WO_USERNAME and WO_PASSWORD.")
|
79
80
|
sys.exit(1)
|
80
81
|
|
81
82
|
# Update os.environ with whichever set is present
|
@@ -52,7 +52,7 @@ class EvaluationsController:
|
|
52
52
|
|
53
53
|
if "WATSONX_SPACE_ID" in os.environ and "WATSONX_APIKEY" in os.environ:
|
54
54
|
provider = "watsonx"
|
55
|
-
elif "WO_INSTANCE" in os.environ and "WO_API_KEY" in os.environ:
|
55
|
+
elif "WO_INSTANCE" in os.environ and ("WO_API_KEY" in os.environ or "WO_PASSWORD" in os.environ):
|
56
56
|
provider = "model_proxy"
|
57
57
|
else:
|
58
58
|
logger.error(
|
@@ -101,10 +101,11 @@ PROVIDER_EXTRA_PROPERTIES_LUT = {
|
|
101
101
|
PROVIDER_REQUIRED_FIELDS = {k:['api_key'] for k in ModelProvider}
|
102
102
|
# Update required fields for each provider
|
103
103
|
# Use sets to denote when a requirement is 'or'
|
104
|
+
# Tuples denote combined requirements like 'and'
|
104
105
|
PROVIDER_REQUIRED_FIELDS.update({
|
105
106
|
ModelProvider.WATSONX: PROVIDER_REQUIRED_FIELDS[ModelProvider.WATSONX] + [{'watsonx_space_id', 'watsonx_project_id', 'watsonx_deployment_id'}],
|
106
107
|
ModelProvider.OLLAMA: PROVIDER_REQUIRED_FIELDS[ModelProvider.OLLAMA] + ['custom_host'],
|
107
|
-
ModelProvider.BEDROCK: [],
|
108
|
+
ModelProvider.BEDROCK: [{'api_key', ('aws_secret_access_key', 'aws_access_key_id')}],
|
108
109
|
})
|
109
110
|
|
110
111
|
# def env_file_to_model_ProviderConfig(model_name: str, env_file_path: str) -> ProviderConfig | None:
|
@@ -158,16 +159,34 @@ def _validate_extra_fields(provider: ModelProvider, cfg: ProviderConfig) -> None
|
|
158
159
|
if cfg.__dict__.get(attr) is not None and attr not in accepted_fields:
|
159
160
|
logger.warning(f"The config option '{attr}' is not used by provider '{provider}'")
|
160
161
|
|
162
|
+
def _check_credential_provided(cred: str | tuple, provided_creds: set) -> bool:
|
163
|
+
if isinstance(cred, tuple):
|
164
|
+
return all(item in provided_creds for item in cred)
|
165
|
+
else:
|
166
|
+
return cred in provided_creds
|
167
|
+
|
168
|
+
def _format_missing_credential(missing_credential: set) -> str:
|
169
|
+
parts = []
|
170
|
+
for cred in missing_credential:
|
171
|
+
if isinstance(cred, tuple):
|
172
|
+
formatted = " and ".join(f"{x}" for x in cred)
|
173
|
+
parts.append(f"({formatted})")
|
174
|
+
else:
|
175
|
+
parts.append(f"{cred}")
|
176
|
+
|
177
|
+
return " or ".join(parts)
|
178
|
+
|
179
|
+
|
161
180
|
def _validate_requirements(provider: ModelProvider, cfg: ProviderConfig, app_id: str = None) -> None:
|
162
181
|
provided_credentials = set([k for k,v in dict(cfg).items() if v is not None])
|
163
182
|
required_creds = PROVIDER_REQUIRED_FIELDS[provider]
|
164
183
|
missing_credentials = []
|
165
184
|
for cred in required_creds:
|
166
185
|
if isinstance(cred, set):
|
167
|
-
if not any(c
|
186
|
+
if not any(_check_credential_provided(c, provided_credentials) for c in cred):
|
168
187
|
missing_credentials.append(cred)
|
169
188
|
else:
|
170
|
-
if
|
189
|
+
if not _check_credential_provided(cred, provided_credentials):
|
171
190
|
missing_credentials.append(cred)
|
172
191
|
|
173
192
|
if len(missing_credentials) > 0:
|
@@ -177,7 +196,7 @@ def _validate_requirements(provider: ModelProvider, cfg: ProviderConfig, app_id:
|
|
177
196
|
missing_credentials_string = f"Be sure to include the following required fields for provider '{provider}' in the connection '{app_id}':"
|
178
197
|
for cred in missing_credentials:
|
179
198
|
if isinstance(cred, set):
|
180
|
-
cred_str =
|
199
|
+
cred_str = _format_missing_credential(cred)
|
181
200
|
else:
|
182
201
|
cred_str = cred
|
183
202
|
missing_credentials_string += f"\n\t - {cred_str}"
|
@@ -23,6 +23,7 @@ from ibm_watsonx_orchestrate.client.connections import get_connections_client
|
|
23
23
|
from ibm_watsonx_orchestrate.agent_builder.connections.types import ConnectionEnvironment
|
24
24
|
from ibm_watsonx_orchestrate.cli.commands.connections.connections_controller import export_connection
|
25
25
|
from ibm_watsonx_orchestrate.cli.commands.tools.tools_controller import ToolsController
|
26
|
+
from ibm_watsonx_orchestrate.utils.utils import sanitize_catalog_label
|
26
27
|
from .types import *
|
27
28
|
|
28
29
|
APPLICATIONS_FILE_VERSION = '1.16.0'
|
@@ -54,7 +55,7 @@ def get_tool_bindings(tool_names: list[str]) -> dict[str, dict]:
|
|
54
55
|
|
55
56
|
return results
|
56
57
|
|
57
|
-
def _patch_agent_yamls(project_root: Path, publisher_name: str):
|
58
|
+
def _patch_agent_yamls(project_root: Path, publisher_name: str, parent_agent_name: str):
|
58
59
|
agents_dir = project_root / "agents"
|
59
60
|
if not agents_dir.exists():
|
60
61
|
return
|
@@ -70,15 +71,18 @@ def _patch_agent_yamls(project_root: Path, publisher_name: str):
|
|
70
71
|
if "language_support" not in agent_data:
|
71
72
|
agent_data["language_support"] = ["English"]
|
72
73
|
if "icon" not in agent_data:
|
73
|
-
agent_data["icon"] =
|
74
|
+
agent_data["icon"] = AGENT_CATALOG_ONLY_PLACEHOLDERS['icon']
|
74
75
|
if "category" not in agent_data:
|
75
76
|
agent_data["category"] = "agent"
|
76
77
|
if "supported_apps" not in agent_data:
|
77
78
|
agent_data["supported_apps"] = []
|
79
|
+
if "agent_role" not in agent_data:
|
80
|
+
agent_data["agent_role"] = "manager" if agent_data.get("name") == parent_agent_name else "collaborator"
|
78
81
|
|
79
82
|
with open(agent_yaml, "w") as f:
|
80
83
|
yaml.safe_dump(agent_data, f, sort_keys=False)
|
81
84
|
|
85
|
+
|
82
86
|
def _create_applications_entry(connection_config: dict) -> dict:
|
83
87
|
return {
|
84
88
|
'app_id': connection_config.get('app_id'),
|
@@ -116,6 +120,15 @@ class PartnersOfferingController:
|
|
116
120
|
sys.exit(1)
|
117
121
|
|
118
122
|
def create(self, offering: str, publisher_name: str, agent_type: str, agent_name: str):
|
123
|
+
|
124
|
+
# Sanitize offering name
|
125
|
+
original_offering = offering
|
126
|
+
offering = sanitize_catalog_label(offering)
|
127
|
+
|
128
|
+
if offering != original_offering:
|
129
|
+
logger.warning("Offering name must contain only alpahnumeric characters or underscore")
|
130
|
+
logger.info(f"Offering '{original_offering}' has been updated to '{offering}'")
|
131
|
+
|
119
132
|
# Create parent project folder
|
120
133
|
project_root = self.root / offering
|
121
134
|
|
@@ -179,7 +192,7 @@ class PartnersOfferingController:
|
|
179
192
|
output_zip.unlink(missing_ok=True)
|
180
193
|
|
181
194
|
# Patch the agent yamls with publisher, tags, icon, etc.
|
182
|
-
_patch_agent_yamls(project_root, publisher_name)
|
195
|
+
_patch_agent_yamls(project_root=project_root, publisher_name=publisher_name, parent_agent_name=agent_name)
|
183
196
|
|
184
197
|
|
185
198
|
# Create offering.yaml file -------------------------------------------------------
|
@@ -337,12 +350,16 @@ class PartnersOfferingController:
|
|
337
350
|
**agent_data
|
338
351
|
)
|
339
352
|
agent = Agent.model_validate(agent_details)
|
340
|
-
AgentsController().persist_record(agent=agent)
|
341
353
|
case AgentKind.EXTERNAL:
|
342
354
|
agent_details = parse_create_external_args(
|
343
355
|
**agent_data
|
344
356
|
)
|
345
357
|
agent = ExternalAgent.model_validate(agent_details)
|
358
|
+
|
359
|
+
# Placeholder detection
|
360
|
+
for label,placeholder in AGENT_CATALOG_ONLY_PLACEHOLDERS.items():
|
361
|
+
if agent_data.get(label) == placeholder:
|
362
|
+
logger.warning(f"Placeholder '{label}' detected for agent '{agent_name}', please ensure '{label}' is correct before packaging.")
|
346
363
|
|
347
364
|
agent_json_path = f"{top_level_folder}/agents/{agent_name}/config.json"
|
348
365
|
zf.writestr(agent_json_path, json.dumps(agent_data, indent=2))
|
@@ -24,6 +24,10 @@ CATALOG_ONLY_FIELDS = [
|
|
24
24
|
'supported_apps'
|
25
25
|
]
|
26
26
|
|
27
|
+
AGENT_CATALOG_ONLY_PLACEHOLDERS = {
|
28
|
+
'icon': "inline-svg-of-icon",
|
29
|
+
}
|
30
|
+
|
27
31
|
class AgentKind(str, Enum):
|
28
32
|
NATIVE = "native"
|
29
33
|
EXTERNAL = "external"
|
@@ -87,21 +91,9 @@ class Offering(BaseModel):
|
|
87
91
|
return values
|
88
92
|
|
89
93
|
def validate_ready_for_packaging(self):
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
placholders = False
|
94
|
-
# part numbers
|
95
|
-
if not self.part_number:
|
96
|
-
raise ValueError(f"Offering '{self.name}' does not have valid part numbers")
|
97
|
-
|
98
|
-
for (k,v) in self.part_number.model_dump().items():
|
99
|
-
if v == CATALOG_PLACEHOLDERS['part_number']:
|
100
|
-
logger.warning(f"Placeholder part number detected for platform '{k}', please ensure valid part numbers are entered before packaging.")
|
101
|
-
placholders = True
|
102
|
-
|
103
|
-
if placholders:
|
104
|
-
raise ValueError(f"Offering '{self.name}' cannot be packaged with placeholder values")
|
94
|
+
# Leaving this fn here in case we want to reintroduce validation
|
95
|
+
pass
|
96
|
+
|
105
97
|
|
106
98
|
|
107
99
|
|
@@ -3,7 +3,7 @@ import typer
|
|
3
3
|
from ibm_watsonx_orchestrate.cli.commands.partners import partners_controller
|
4
4
|
from ibm_watsonx_orchestrate.cli.commands.partners.offering.partners_offering_command import partners_offering
|
5
5
|
|
6
|
-
partners_app = typer.Typer(no_args_is_help=True
|
6
|
+
partners_app = typer.Typer(no_args_is_help=True)
|
7
7
|
|
8
8
|
partners_app.add_typer(
|
9
9
|
partners_offering,
|
@@ -48,6 +48,7 @@ def run_compose_lite(
|
|
48
48
|
experimental_with_ibm_telemetry=False,
|
49
49
|
with_doc_processing=False,
|
50
50
|
with_voice=False,
|
51
|
+
with_connections_ui=False,
|
51
52
|
with_langflow=False,
|
52
53
|
) -> None:
|
53
54
|
EnvService.prepare_clean_env(final_env_file)
|
@@ -80,6 +81,8 @@ def run_compose_lite(
|
|
80
81
|
profiles.append("docproc")
|
81
82
|
if with_voice:
|
82
83
|
profiles.append("voice")
|
84
|
+
if with_connections_ui:
|
85
|
+
profiles.append("connections-ui")
|
83
86
|
if with_langflow:
|
84
87
|
profiles.append("langflow")
|
85
88
|
|
@@ -358,11 +361,14 @@ def server_start(
|
|
358
361
|
'--with-voice', '-v',
|
359
362
|
help='Enable voice controller to interact with the chat via voice channels'
|
360
363
|
),
|
364
|
+
with_connections_ui: bool = typer.Option(
|
365
|
+
False,
|
366
|
+
'--with-connections-ui', '-c',
|
367
|
+
help='Enables connections ui to facilitate OAuth connections and credential management via a UI'),
|
361
368
|
with_langflow: bool = typer.Option(
|
362
369
|
False,
|
363
370
|
'--with-langflow',
|
364
|
-
help='Enable Langflow UI, available at http://localhost:7861'
|
365
|
-
hidden=True
|
371
|
+
help='Enable Langflow UI, available at http://localhost:7861'
|
366
372
|
),
|
367
373
|
):
|
368
374
|
cli_config = Config()
|
@@ -425,6 +431,7 @@ def server_start(
|
|
425
431
|
experimental_with_ibm_telemetry=experimental_with_ibm_telemetry,
|
426
432
|
with_doc_processing=with_doc_processing,
|
427
433
|
with_voice=with_voice,
|
434
|
+
with_connections_ui=with_connections_ui,
|
428
435
|
with_langflow=with_langflow, env_service=env_service)
|
429
436
|
|
430
437
|
run_db_migration()
|
@@ -455,9 +462,10 @@ def server_start(
|
|
455
462
|
logger.info(f"You can access the observability platform Langfuse at http://localhost:3010, username: orchestrate@ibm.com, password: orchestrate")
|
456
463
|
if with_doc_processing:
|
457
464
|
logger.info(f"Document processing in Flows (Public Preview) has been enabled.")
|
465
|
+
if with_connections_ui:
|
466
|
+
logger.info("Connections UI can be found at http://localhost:3412/connectors")
|
458
467
|
if with_langflow:
|
459
468
|
logger.info("Langflow has been enabled, the Langflow UI is available at http://localhost:7861")
|
460
|
-
|
461
469
|
@server_app.command(name="stop")
|
462
470
|
def server_stop(
|
463
471
|
user_env_file: str = typer.Option(
|
@@ -47,11 +47,11 @@ def import_toolkit(
|
|
47
47
|
] = None,
|
48
48
|
url: Annotated[
|
49
49
|
Optional[str],
|
50
|
-
typer.Option("--url", "-u", help="The URL of the remote MCP server"
|
50
|
+
typer.Option("--url", "-u", help="The URL of the remote MCP server"),
|
51
51
|
] = None,
|
52
52
|
transport: Annotated[
|
53
53
|
ToolkitTransportKind,
|
54
|
-
typer.Option("--transport", help="The communication protocol to use for the remote MCP server. Only \"sse\" or \"streamable_http\" supported"
|
54
|
+
typer.Option("--transport", help="The communication protocol to use for the remote MCP server. Only \"sse\" or \"streamable_http\" supported"),
|
55
55
|
] = None,
|
56
56
|
tools: Annotated[
|
57
57
|
Optional[str],
|