ibm-watsonx-orchestrate 1.12.2__py3-none-any.whl → 1.13.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. ibm_watsonx_orchestrate/__init__.py +1 -1
  2. ibm_watsonx_orchestrate/agent_builder/connections/types.py +34 -3
  3. ibm_watsonx_orchestrate/agent_builder/knowledge_bases/types.py +13 -2
  4. ibm_watsonx_orchestrate/agent_builder/models/types.py +17 -1
  5. ibm_watsonx_orchestrate/agent_builder/toolkits/types.py +14 -2
  6. ibm_watsonx_orchestrate/agent_builder/tools/__init__.py +1 -1
  7. ibm_watsonx_orchestrate/agent_builder/tools/types.py +21 -3
  8. ibm_watsonx_orchestrate/agent_builder/voice_configurations/__init__.py +1 -1
  9. ibm_watsonx_orchestrate/agent_builder/voice_configurations/types.py +11 -0
  10. ibm_watsonx_orchestrate/cli/commands/agents/agents_controller.py +31 -53
  11. ibm_watsonx_orchestrate/cli/commands/connections/connections_command.py +2 -2
  12. ibm_watsonx_orchestrate/cli/commands/connections/connections_controller.py +54 -28
  13. ibm_watsonx_orchestrate/cli/commands/copilot/copilot_command.py +36 -2
  14. ibm_watsonx_orchestrate/cli/commands/copilot/copilot_controller.py +270 -26
  15. ibm_watsonx_orchestrate/cli/commands/copilot/copilot_server_controller.py +4 -4
  16. ibm_watsonx_orchestrate/cli/commands/evaluations/evaluations_command.py +30 -3
  17. ibm_watsonx_orchestrate/cli/commands/evaluations/evaluations_environment_manager.py +158 -0
  18. ibm_watsonx_orchestrate/cli/commands/knowledge_bases/knowledge_bases_command.py +26 -0
  19. ibm_watsonx_orchestrate/cli/commands/knowledge_bases/knowledge_bases_controller.py +150 -34
  20. ibm_watsonx_orchestrate/cli/commands/models/models_command.py +2 -2
  21. ibm_watsonx_orchestrate/cli/commands/models/models_controller.py +29 -10
  22. ibm_watsonx_orchestrate/cli/commands/server/server_command.py +50 -18
  23. ibm_watsonx_orchestrate/cli/commands/toolkit/toolkit_controller.py +139 -27
  24. ibm_watsonx_orchestrate/cli/commands/tools/tools_command.py +2 -2
  25. ibm_watsonx_orchestrate/cli/commands/tools/tools_controller.py +43 -29
  26. ibm_watsonx_orchestrate/cli/commands/voice_configurations/voice_configurations_controller.py +23 -11
  27. ibm_watsonx_orchestrate/cli/common.py +26 -0
  28. ibm_watsonx_orchestrate/cli/config.py +30 -1
  29. ibm_watsonx_orchestrate/client/agents/agent_client.py +1 -1
  30. ibm_watsonx_orchestrate/client/connections/connections_client.py +1 -14
  31. ibm_watsonx_orchestrate/client/copilot/cpe/copilot_cpe_client.py +55 -11
  32. ibm_watsonx_orchestrate/client/knowledge_bases/knowledge_base_client.py +6 -2
  33. ibm_watsonx_orchestrate/client/model_policies/model_policies_client.py +1 -1
  34. ibm_watsonx_orchestrate/client/models/models_client.py +1 -1
  35. ibm_watsonx_orchestrate/client/threads/threads_client.py +34 -0
  36. ibm_watsonx_orchestrate/client/tools/tempus_client.py +4 -2
  37. ibm_watsonx_orchestrate/client/utils.py +29 -7
  38. ibm_watsonx_orchestrate/docker/compose-lite.yml +3 -2
  39. ibm_watsonx_orchestrate/docker/default.env +15 -10
  40. ibm_watsonx_orchestrate/flow_builder/flows/flow.py +28 -12
  41. ibm_watsonx_orchestrate/flow_builder/types.py +25 -0
  42. ibm_watsonx_orchestrate/flow_builder/utils.py +1 -9
  43. ibm_watsonx_orchestrate/utils/async_helpers.py +31 -0
  44. ibm_watsonx_orchestrate/utils/docker_utils.py +1177 -33
  45. ibm_watsonx_orchestrate/utils/environment.py +165 -20
  46. ibm_watsonx_orchestrate/utils/exceptions.py +1 -1
  47. ibm_watsonx_orchestrate/utils/tokens.py +51 -0
  48. ibm_watsonx_orchestrate/utils/utils.py +57 -2
  49. {ibm_watsonx_orchestrate-1.12.2.dist-info → ibm_watsonx_orchestrate-1.13.0b1.dist-info}/METADATA +2 -2
  50. {ibm_watsonx_orchestrate-1.12.2.dist-info → ibm_watsonx_orchestrate-1.13.0b1.dist-info}/RECORD +53 -48
  51. {ibm_watsonx_orchestrate-1.12.2.dist-info → ibm_watsonx_orchestrate-1.13.0b1.dist-info}/WHEEL +0 -0
  52. {ibm_watsonx_orchestrate-1.12.2.dist-info → ibm_watsonx_orchestrate-1.13.0b1.dist-info}/entry_points.txt +0 -0
  53. {ibm_watsonx_orchestrate-1.12.2.dist-info → ibm_watsonx_orchestrate-1.13.0b1.dist-info}/licenses/LICENSE +0 -0
@@ -87,7 +87,8 @@ def _check_if_auth_config_file(folder, file):
87
87
 
88
88
  def clear_protected_env_credentials_token():
89
89
  auth_cfg = Config(config_file_folder=AUTH_CONFIG_FILE_FOLDER, config_file=AUTH_CONFIG_FILE)
90
- auth_cfg.delete(AUTH_SECTION_HEADER, PROTECTED_ENV_NAME, AUTH_MCSP_TOKEN_OPT)
90
+ if auth_cfg.exists(AUTH_SECTION_HEADER, PROTECTED_ENV_NAME, AUTH_MCSP_TOKEN_OPT):
91
+ auth_cfg.delete(AUTH_SECTION_HEADER, PROTECTED_ENV_NAME, AUTH_MCSP_TOKEN_OPT)
91
92
 
92
93
 
93
94
  class ConfigFileTypes(str, Enum):
@@ -232,3 +233,31 @@ class Config:
232
233
 
233
234
  with open(self.config_file_path, 'w') as conf_file:
234
235
  yaml.dump(deletion_data, conf_file, allow_unicode=True)
236
+
237
+ def exists(self, *args) -> bool:
238
+ """
239
+ Determines if an item of arbitrary depth exists in the config file.
240
+ Takes an arbitrary number of args. Uses the args in order
241
+ as keys to access deeper sections of the config and then deleting the last specified key.
242
+ """
243
+ if len(args) < 1:
244
+ raise BadRequest("Config.delete() requires at least one positional argument")
245
+
246
+ config_data = {}
247
+ try:
248
+ with open(self.config_file_path, 'r') as conf_file:
249
+ config_data = yaml_safe_load(conf_file) or {}
250
+ except FileNotFoundError:
251
+ pass
252
+
253
+ expression = "config_data"
254
+ for key in args:
255
+ temp = eval(expression)
256
+
257
+ if not isinstance(temp, dict) or key not in temp:
258
+ return False
259
+
260
+ else:
261
+ expression += f"['{key}']"
262
+
263
+ return True
@@ -131,7 +131,7 @@ class AgentClient(BaseAPIClient):
131
131
  agent = transform_agents_to_flat_agent_spec(self._get(f"{self.base_endpoint}/{agent_id}"))
132
132
  return agent
133
133
  except ClientAPIException as e:
134
- if e.response.status_code == 404 and "not found with the given name" in e.response.text:
134
+ if e.response.status_code == 404 and ("not found with the given name" in e.response.text or ("Agent" in e.response.text and "not found" in e.response.text)):
135
135
  return ""
136
136
  raise(e)
137
137
 
@@ -3,27 +3,14 @@ from typing import List
3
3
  from ibm_cloud_sdk_core.authenticators import MCSPAuthenticator
4
4
  from pydantic import BaseModel, ValidationError
5
5
  from typing import Optional
6
- from enum import Enum
7
6
 
8
7
  from ibm_watsonx_orchestrate.client.base_api_client import BaseAPIClient, ClientAPIException
9
- from ibm_watsonx_orchestrate.agent_builder.connections.types import ConnectionEnvironment, ConnectionPreference, ConnectionConfiguration, ConnectionAuthType, ConnectionSecurityScheme, IdpConfigData, AppConfigData, ConnectionType
8
+ from ibm_watsonx_orchestrate.agent_builder.connections.types import ConnectionEnvironment, ConnectionPreference, ConnectionConfiguration, ConnectionAuthType, ConnectionSecurityScheme, IdpConfigData, AppConfigData, ConnectionType, FetchConfigAuthTypes
10
9
  from ibm_watsonx_orchestrate.client.utils import is_cpd_env, is_local_dev
11
10
 
12
11
  import logging
13
12
  logger = logging.getLogger(__name__)
14
13
 
15
-
16
- class FetchConfigAuthTypes(str, Enum):
17
- BASIC_AUTH = ConnectionType.BASIC_AUTH.value
18
- BEARER_TOKEN = ConnectionType.BEARER_TOKEN.value
19
- API_KEY_AUTH = ConnectionType.API_KEY_AUTH.value
20
- OAUTH2_AUTH_CODE = ConnectionType.OAUTH2_AUTH_CODE.value
21
- OAUTH2_IMPLICIT = 'oauth2_implicit'
22
- OAUTH2_PASSWORD = ConnectionType.OAUTH2_PASSWORD.value
23
- OAUTH2_CLIENT_CREDS = ConnectionType.OAUTH2_CLIENT_CREDS.value
24
- OAUTH_ON_BEHALF_OF_FLOW = ConnectionType.OAUTH_ON_BEHALF_OF_FLOW.value
25
- KEY_VALUE = ConnectionType.KEY_VALUE.value
26
-
27
14
  class ListConfigsResponse(BaseModel):
28
15
  connection_id: str = None,
29
16
  app_id: str = None
@@ -1,4 +1,4 @@
1
- from typing import Dict, Any
1
+ from typing import Dict, Any, List
2
2
  from uuid import uuid4
3
3
 
4
4
  from ibm_watsonx_orchestrate.client.base_api_client import BaseAPIClient
@@ -13,22 +13,26 @@ class CPEClient(BaseAPIClient):
13
13
  self.chat_id = str(uuid4())
14
14
  super().__init__(*args, **kwargs)
15
15
  self.base_url = kwargs.get("base_url", self.base_url)
16
- self.chat_model_name = 'llama-3-3-70b-instruct'
17
16
 
18
17
  def _get_headers(self) -> dict:
19
18
  return {
20
19
  "chat_id": self.chat_id
21
20
  }
22
21
 
22
+ def _get_chat_model_name_or_default(self, chat_nodel_name):
23
+ if chat_nodel_name:
24
+ return chat_nodel_name
25
+ return 'watsonx/meta-llama/llama-3-3-70b-instruct'
23
26
 
24
- def submit_pre_cpe_chat(self, user_message: str | None =None, tools: Dict[str, Any] = None, collaborators: Dict[str, Any] = None, knowledge_bases: Dict[str, Any] = None, selected:bool=False) -> dict:
27
+ def submit_pre_cpe_chat(self, chat_llm: str |None, user_message: str | None =None,
28
+ tools: Dict[str, Any] = None, collaborators: Dict[str, Any] = None, knowledge_bases: Dict[str, Any] = None, selected:bool=False) -> dict:
25
29
  payload = {
26
30
  "message": user_message,
27
31
  "tools": tools,
28
32
  "collaborators": collaborators,
29
33
  "knowledge_bases": knowledge_bases,
30
34
  "chat_id": self.chat_id,
31
- "chat_model_name": self.chat_model_name,
35
+ "chat_model_name": self._get_chat_model_name_or_default(chat_llm),
32
36
  'selected':selected
33
37
  }
34
38
 
@@ -37,31 +41,71 @@ class CPEClient(BaseAPIClient):
37
41
  if response:
38
42
  return response[-1]
39
43
 
44
+ def refine_agent_with_chats(self, instruction: str, chat_llm: str | None,
45
+ tools: Dict[str, Any], collaborators: Dict[str, Any], knowledge_bases: Dict[str, Any], trajectories_with_feedback: List[List[dict]], model: str | None = None) -> dict:
46
+ """
47
+ Refines an agent's instruction using provided chat trajectories and optional model name.
48
+ This method sends a payload containing the agent's current instruction and a list of chat trajectories
49
+ to the Copilot Prompt Engine (CPE) for refinement.
50
+ Optionally, a target model name can be specified to use in the refinement process.
51
+ Parameters:
52
+ instruction (str): The current instruction or prompt associated with the agent.
53
+ chat_llm(str): The name of the chat model
54
+ tools (Dict[str, Any]) - a dictionary containing the selected tools
55
+ collaborators (Dict[str, Any]) - a dictionary containing the selected collaborators
56
+ knowledge_bases (Dict[str, Any]) - a dictionary containing the selected knowledge_bases
57
+ trajectories_with_feedback (List[List[dict]]): A list of chat trajectories, where each trajectory is a list
58
+ of message dictionaries that may include user feedback.
59
+ model (str | None): Optional. The name of the model to use for refinement.
60
+ Returns:
61
+ dict: The last response from the CPE containing the refined instruction.
62
+ """
40
63
 
41
- def init_with_context(self, model: str | None = None, context_data: Dict[str, Any] = None) -> dict:
42
64
  payload = {
43
- "context_data": context_data,
44
- "chat_id": self.chat_id
65
+ "trajectories_with_feedback":trajectories_with_feedback,
66
+ "instruction":instruction,
67
+ "tools": tools,
68
+ "collaborators": collaborators,
69
+ "knowledge_bases": knowledge_bases,
70
+ "chat_model_name": self._get_chat_model_name_or_default(chat_llm),
45
71
  }
46
72
 
47
73
  if model:
48
74
  payload["target_model_name"] = model
49
75
 
76
+ response = self._post_nd_json("/wxo-cpe/refine-agent-with-trajectories", data=payload)
77
+
78
+ if response:
79
+ return response[-1]
80
+
81
+ def init_with_context(self, chat_llm: str | None,
82
+ target_model_name: str | None = None, context_data: Dict[str, Any] = None) -> dict:
83
+ payload = {
84
+ "context_data": context_data,
85
+ "chat_id": self.chat_id,
86
+ "chat_model_name": self._get_chat_model_name_or_default(chat_llm),
87
+ }
88
+
89
+ if target_model_name:
90
+ payload["target_model_name"] = target_model_name
91
+
50
92
  response = self._post_nd_json("/wxo-cpe/init_cpe_from_wxo", data=payload)
51
93
 
52
94
  if response:
53
95
  return response[-1]
54
96
 
55
97
 
56
- def invoke(self, prompt: str, model: str | None = None, context_data: Dict[str, Any] = None) -> dict:
98
+ def invoke(self, prompt: str, chat_llm: str| None,
99
+ target_model_name: str | None = None, context_data: Dict[str, Any] = None) -> dict:
57
100
  payload = {
58
101
  "prompt": prompt,
59
102
  "context_data": context_data,
60
- "chat_id": self.chat_id
103
+ "chat_id": self.chat_id,
104
+ "chat_model_name": self._get_chat_model_name_or_default(chat_llm),
61
105
  }
62
106
 
63
- if model:
64
- payload["target_model_name"] = model
107
+ if target_model_name:
108
+ payload["target_model_name"] = target_model_name
65
109
 
66
110
  response = self._post_nd_json("/wxo-cpe/invoke", data=payload)
67
111
 
@@ -30,8 +30,12 @@ class KnowledgeBaseClient(BaseAPIClient):
30
30
  def get_by_id(self, knowledge_base_id: str) -> dict:
31
31
  return self._get(f"{self.base_endpoint}/{knowledge_base_id}")
32
32
 
33
- def get_by_names(self, name: List[str]) -> List[dict]:
34
- formatted_names = [f"names={x}" for x in name]
33
+ def get_by_names(self, names: List[str]) -> List[dict]:
34
+ formatted_names = [f"names={x}" for x in names]
35
+ return self._get(f"{self.base_endpoint}?{'&'.join(formatted_names)}")
36
+
37
+ def get_by_ids(self, ids: List[str]) -> List[dict]:
38
+ formatted_names = [f"ids={x}" for x in ids]
35
39
  return self._get(f"{self.base_endpoint}?{'&'.join(formatted_names)}")
36
40
 
37
41
  def status(self, knowledge_base_id: str) -> dict:
@@ -58,7 +58,7 @@ class ModelPoliciesClient(BaseAPIClient):
58
58
  return []
59
59
  raise e
60
60
 
61
- def get_draft_by_name(self, policy_name: str) -> ModelPolicy:
61
+ def get_draft_by_name(self, policy_name: str) -> List[ModelPolicy]:
62
62
  return self.get_drafts_by_names([policy_name])
63
63
 
64
64
 
@@ -60,7 +60,7 @@ class ModelsClient(BaseAPIClient):
60
60
  return []
61
61
  raise e
62
62
 
63
- def get_draft_by_name(self, model_name: str) -> ListVirtualModel:
63
+ def get_draft_by_name(self, model_name: str) -> List[ListVirtualModel]:
64
64
  return self.get_drafts_by_names([model_name])
65
65
 
66
66
 
@@ -0,0 +1,34 @@
1
+ from ibm_watsonx_orchestrate.client.base_api_client import BaseAPIClient
2
+
3
+
4
+ class ThreadsClient(BaseAPIClient):
5
+ """
6
+ Client to handle read operations for Threads (chat history- trajectories) endpoints
7
+ """
8
+
9
+ def __init__(self, *args, **kwargs):
10
+ super().__init__(*args, **kwargs)
11
+ self.base_endpoint = "/threads"
12
+
13
+ def get_all_threads(self, agent_id) -> dict:
14
+ return self._get(self.base_endpoint, params={"agent_id": agent_id})
15
+
16
+ def get_thread_messages(self, thread_id) -> dict:
17
+ return self._get(f"{self.base_endpoint}/{thread_id}/messages")
18
+
19
+ def get(self) -> dict:
20
+ return self._get(self.base_endpoint)
21
+
22
+ def get_threads_messages(self, thread_ids: list[str]):
23
+ """
24
+ get the messages for a list of threads (chats) ids
25
+ :param thread_ids:
26
+ :param threads_client:
27
+ :return:
28
+ """
29
+ all_thread_messages = []
30
+ for thread_id in thread_ids:
31
+ thread_messages = self.get_thread_messages(thread_id=thread_id)
32
+ all_thread_messages.append(thread_messages)
33
+
34
+ return all_thread_messages
@@ -12,7 +12,7 @@ class TempusClient(BaseAPIClient):
12
12
  This may be temporary and may want to create a proxy API in wxo-server
13
13
  to redirect to the internal tempus runtime, and add a new operation in the ToolClient instead
14
14
  """
15
- def __init__(self, base_url: str, api_key: str = None, is_local: bool = False, authenticator: MCSPAuthenticator = None):
15
+ def __init__(self, base_url: str, api_key: str = None, is_local: bool = False, authenticator: MCSPAuthenticator = None, *args, **kwargs):
16
16
  parsed_url = urlparse(base_url)
17
17
 
18
18
  # Reconstruct netloc with new port - use default above - eventually we need to open up a way through the wxo-server API
@@ -26,7 +26,9 @@ class TempusClient(BaseAPIClient):
26
26
  base_url=new_url,
27
27
  api_key=api_key,
28
28
  is_local=is_local,
29
- authenticator=authenticator
29
+ authenticator=authenticator,
30
+ *args,
31
+ **kwargs
30
32
  )
31
33
 
32
34
  def get_tempus_endpoint(self) -> str:
@@ -175,23 +175,24 @@ def instantiate_client(client: type[T] , url: str | None=None) -> T:
175
175
  raise FileNotFoundError(message)
176
176
 
177
177
 
178
+ def get_arm_architectures () -> list[str]:
179
+ # NOTE: intentionally omitting 32 bit arm architectures.
180
+ return ["aarch64", "arm64", "arm", "aarch64_be", "armv8b", "armv8l"]
181
+
182
+
178
183
  def get_architecture () -> str:
179
184
  arch = platform.machine().lower()
180
185
  if arch in ("amd64", "x86_64"):
181
186
  return "amd64"
182
-
183
- elif arch == "i386":
184
- return "386"
185
-
186
- elif arch in ("aarch64", "arm64", "arm"):
187
- return "arm"
187
+ elif arch in get_arm_architectures():
188
+ return arch
188
189
 
189
190
  else:
190
191
  raise Exception("Unsupported architecture %s" % arch)
191
192
 
192
193
 
193
194
  def is_arm_architecture () -> bool:
194
- return platform.machine().lower() in ("aarch64", "arm64", "arm")
195
+ return platform.machine().lower() in get_arm_architectures()
195
196
 
196
197
 
197
198
  def get_os_type () -> str:
@@ -201,3 +202,24 @@ def get_os_type () -> str:
201
202
 
202
203
  else:
203
204
  raise Exception("Unsupported operating system %s" % system)
205
+
206
+ def concat_bin_files(target_bin_file: str, source_files: list[str], read_chunk_size: int = None,
207
+ delete_source_files_post: bool = True) -> None:
208
+ if read_chunk_size is None:
209
+ # default read chunk size is 100 MB.
210
+ read_chunk_size = 100 * 1024 * 1024
211
+
212
+ with open(target_bin_file, "wb") as target:
213
+ for source_file in source_files:
214
+ with open(source_file, "rb") as source:
215
+ while True:
216
+ source_chunk = source.read(read_chunk_size)
217
+
218
+ if source_chunk:
219
+ target.write(source_chunk)
220
+
221
+ else:
222
+ break
223
+
224
+ if delete_source_files_post is True:
225
+ os.remove(source_file)
@@ -175,6 +175,7 @@ services:
175
175
  ENABLE_EDIT_PROMPTS: "true"
176
176
  LANGFLOW_ENABLED: ${LANGFLOW_ENABLED:-false}
177
177
  ENABLE_EMBED_SCRIPT: "true"
178
+ ENABLE_BULK_TESTING: "true"
178
179
  command: 'npm start'
179
180
  ports:
180
181
  - "4025:4025"
@@ -241,7 +242,7 @@ services:
241
242
  - "wxo-server-minio"
242
243
 
243
244
  wxo-milvus-etcd:
244
- image: quay.io/coreos/etcd:v3.5.18
245
+ image: ${ETCD_REGISTRY:-quay.io}/coreos/etcd:${ETCD_TAG:-v3.5.18}
245
246
  environment:
246
247
  - ETCD_AUTO_COMPACTION_MODE=revision
247
248
  - ETCD_AUTO_COMPACTION_RETENTION=1000
@@ -580,7 +581,7 @@ services:
580
581
  # IBM AGENT-OPS
581
582
  ########################
582
583
  elasticsearch:
583
- image: docker.elastic.co/elasticsearch/elasticsearch:8.19.0
584
+ image: ${ELASTICSEARCH_REGISTRY:-docker.elastic.co}/elasticsearch/elasticsearch:${ELASTICSEARCH_TAG:-8.19.0}
584
585
  profiles: [ibm-telemetry]
585
586
  environment:
586
587
  - discovery.type=single-node
@@ -73,13 +73,12 @@ AGENT_GATEWAY_REGISTRY=
73
73
  DB_REGISTRY=
74
74
  # If you build multiarch set all three of these to the same, we have a pr against main
75
75
  # to not have this separation, but we can merge it later
76
- DBTAG=17-09-2025-8a9aff2
77
- AMDDBTAG=17-09-2025-8a9aff2
78
- ARM64DBTAG=17-09-2025-8a9aff2
76
+ DBTAG=06-10-2025-87419d6
77
+ AMDDBTAG=06-10-2025-87419d6
78
+ ARM64DBTAG=06-10-2025-87419d6
79
79
 
80
80
  UI_REGISTRY=
81
- UITAG=22-09-2025-e35f498
82
-
81
+ UITAG=10-10-2025-f91ab60
83
82
  CM_REGISTRY=
84
83
  CM_TAG=16-09-2025-e33b344
85
84
 
@@ -92,10 +91,10 @@ TRM_REGISTRY=
92
91
  TR_TAG=24-09-2025-a515038
93
92
  TR_REGISTRY=
94
93
 
95
- BUILDER_TAG=24-09-2025-f9b68d8
94
+ BUILDER_TAG=06-10-2025-6387930
96
95
  BUILDER_REGISTRY=
97
96
 
98
- FLOW_RUNTIME_TAG=22-09-2025-0bd3f58
97
+ FLOW_RUNTIME_TAG=06-10-2025-1bc69ec
99
98
  FLOW_RUMTIME_REGISTRY=
100
99
 
101
100
 
@@ -108,13 +107,13 @@ JAEGER_PROXY_REGISTRY=
108
107
  SOCKET_HANDLER_TAG=29-05-2025
109
108
  SOCKET_HANDLER_REGISTRY=
110
109
 
111
- CPE_TAG=29-08-2025-e612bea
110
+ CPE_TAG=06-10-2025-74e5ca0
112
111
  CPE_REGISTRY=
113
112
 
114
113
  VOICE_CONTROLLER_TAG=12-09-2025-0e04772
115
114
  VOICE_CONTROLLER_REGISTRY=
116
115
 
117
- LANGFLOW_TAG=1.5.0.post2
116
+ LANGFLOW_TAG=
118
117
  LANGFLOW_IMAGE=
119
118
 
120
119
  # IBM Document Processing
@@ -124,9 +123,15 @@ WDU_REGISTRY=
124
123
  DOCPROC_DPS_TAG=20250910-165658-290-c566031
125
124
  DOCPROC_LLMSERVICE_TAG=20250915-main-139-7a36ad3
126
125
  DOCPROC_CACHE_TAG=20250916-master-86-454157f
127
- DOCPROC_DPI_TAG=20250910-214413-288-a348dfd9
126
+ DOCPROC_DPI_TAG=20250926-122324-302-cb9b3a46
128
127
  DOCPROC_REGISTRY=
129
128
 
129
+ ETCD_TAG=
130
+ ETCD_REGISTRY=
131
+
132
+ ELASTICSEARCH_TAG=
133
+ ELASTICSEARCH_REGISTRY=
134
+
130
135
  # END -- IMAGE REGISTRIES AND TAGS
131
136
 
132
137
  CPD_VERIFY=true
@@ -26,7 +26,9 @@ from ibm_watsonx_orchestrate.client.tools.tool_client import ToolClient
26
26
  from ibm_watsonx_orchestrate.client.tools.tempus_client import TempusClient
27
27
  from ibm_watsonx_orchestrate.client.utils import instantiate_client
28
28
  from ..types import (
29
- DocProcKVPSchema, Assignment, Conditions, EndNodeSpec, Expression, ForeachPolicy, ForeachSpec, LoopSpec, BranchNodeSpec, MatchPolicy, NodeIdCondition, PlainTextReadingOrder, PromptExample, PromptLLMParameters, PromptNodeSpec, ScriptNodeSpec, TimerNodeSpec,
29
+ DocProcKVPSchema, Assignment, Conditions, EndNodeSpec, Expression, ForeachPolicy, ForeachSpec, LoopSpec, BranchNodeSpec, MatchPolicy,
30
+ NodeIdCondition, PlainTextReadingOrder, PromptExample, PromptLLMParameters, PromptNodeSpec, ScriptNodeSpec, TimerNodeSpec,
31
+ NodeErrorHandlerConfig, NodeIdCondition, PlainTextReadingOrder, PromptExample, PromptLLMParameters, PromptNodeSpec,
30
32
  StartNodeSpec, ToolSpec, JsonSchemaObject, ToolRequestBody, ToolResponseBody, UserFieldKind, UserFieldOption, UserFlowSpec, UserNodeSpec, WaitPolicy,
31
33
  DocProcSpec, TextExtractionResponse, DocProcInput, DecisionsNodeSpec, DecisionsRule, DocExtSpec, File, DocumentClassificationResponse, DocClassifierSpec, DocumentProcessingCommonInput
32
34
  )
@@ -269,7 +271,8 @@ class Flow(Node):
269
271
 
270
272
  def _create_node_from_tool_fn(
271
273
  self,
272
- tool: Callable
274
+ tool: Callable,
275
+ error_handler_config: Optional[NodeErrorHandlerConfig] = None
273
276
  ) -> ToolNode:
274
277
  if not isinstance(tool, Callable):
275
278
  raise ValueError("Only functions with @tool decorator can be added.")
@@ -292,7 +295,8 @@ class Flow(Node):
292
295
  input_schema = tool_spec.input_schema,
293
296
  output_schema = tool_spec.output_schema,
294
297
  output_schema_object = spec.output_schema_object,
295
- tool = tool_spec.name)
298
+ tool = tool_spec.name,
299
+ error_handler_config = error_handler_config,)
296
300
 
297
301
  return ToolNode(spec=toolnode_spec)
298
302
 
@@ -302,14 +306,18 @@ class Flow(Node):
302
306
  name: str | None = None,
303
307
  display_name: str | None = None,
304
308
  description: str | None = None,
305
-
306
309
  input_schema: type[BaseModel] | None = None,
307
310
  output_schema: type[BaseModel] | None = None,
308
- input_map: DataMap = None
311
+ input_map: DataMap = None,
312
+ error_handler_config: NodeErrorHandlerConfig | None = None
309
313
  ) -> ToolNode:
310
314
  '''create a tool node in the flow'''
311
315
  if tool is None:
312
316
  raise ValueError("tool must be provided")
317
+
318
+
319
+ if isinstance(error_handler_config, dict):
320
+ error_handler_config = NodeErrorHandlerConfig.model_validate(error_handler_config)
313
321
 
314
322
  if isinstance(tool, str):
315
323
  name = name if name is not None and name != "" else tool
@@ -338,14 +346,16 @@ class Flow(Node):
338
346
  input_schema= _get_tool_request_body(input_schema_obj),
339
347
  output_schema= _get_tool_response_body(output_schema_obj),
340
348
  output_schema_object = output_schema_obj,
341
- tool = tool)
349
+ tool = tool,
350
+ error_handler_config = error_handler_config
351
+ )
342
352
 
343
353
  node = ToolNode(spec=toolnode_spec)
344
354
  elif isinstance(tool, PythonTool):
345
355
  if callable(tool):
346
356
  tool_spec = getattr(tool, "__tool_spec__", None)
347
357
  if tool_spec:
348
- node = self._create_node_from_tool_fn(tool)
358
+ node = self._create_node_from_tool_fn(tool, error_handler_config = error_handler_config)
349
359
  else:
350
360
  raise ValueError("Only functions with @tool decorator can be added.")
351
361
  else:
@@ -458,7 +468,8 @@ class Flow(Node):
458
468
  description: str | None = None,
459
469
  input_schema: type[BaseModel]|None = None,
460
470
  output_schema: type[BaseModel]|None=None,
461
- input_map: DataMap = None) -> PromptNode:
471
+ input_map: DataMap = None,
472
+ error_handler_config: NodeErrorHandlerConfig | None = None,) -> PromptNode:
462
473
 
463
474
  if name is None:
464
475
  raise ValueError("name must be provided.")
@@ -477,6 +488,7 @@ class Flow(Node):
477
488
  prompt_examples=prompt_examples,
478
489
  llm=llm,
479
490
  llm_parameters=llm_parameters,
491
+ error_handler_config=error_handler_config,
480
492
  input_schema=_get_tool_request_body(input_schema_obj),
481
493
  output_schema=_get_tool_response_body(output_schema_obj),
482
494
  output_schema_object = output_schema_obj
@@ -499,7 +511,8 @@ class Flow(Node):
499
511
  classes: type[BaseModel]| None = None,
500
512
  description: str | None = None,
501
513
  min_confidence: float = 0.0,
502
- input_map: DataMap = None) -> DocClassifierNode:
514
+ input_map: DataMap = None,
515
+ enable_review: bool = False) -> DocClassifierNode:
503
516
 
504
517
  if name is None :
505
518
  raise ValueError("name must be provided.")
@@ -520,7 +533,8 @@ class Flow(Node):
520
533
  output_schema=_get_tool_response_body(output_schema_obj),
521
534
  output_schema_object = output_schema_obj,
522
535
  config=doc_classifier_config,
523
- version=version
536
+ version=version,
537
+ enable_review=enable_review
524
538
  )
525
539
  node = DocClassifierNode(spec=task_spec)
526
540
 
@@ -572,7 +586,8 @@ class Flow(Node):
572
586
  input_map: DataMap = None,
573
587
  enable_hw: bool = False,
574
588
  min_confidence: float = 0, # Setting a small value because htil is not supported for pro code.
575
- review_fields: List[str] = []) -> tuple[DocExtNode, type[BaseModel]]:
589
+ review_fields: List[str] = [],
590
+ enable_review: bool = False) -> tuple[DocExtNode, type[BaseModel]]:
576
591
 
577
592
  if name is None :
578
593
  raise ValueError("name must be provided.")
@@ -599,7 +614,8 @@ class Flow(Node):
599
614
  version=version,
600
615
  enable_hw=enable_hw,
601
616
  min_confidence=min_confidence,
602
- review_fields=review_fields
617
+ review_fields=review_fields,
618
+ enable_review=enable_review
603
619
  )
604
620
  node = DocExtNode(spec=task_spec)
605
621
 
@@ -256,6 +256,7 @@ class DocProcCommonNodeSpec(NodeSpec):
256
256
  class DocClassifierSpec(DocProcCommonNodeSpec):
257
257
  version : str = Field(description="A version of the spec")
258
258
  config : DocClassifierConfig
259
+ enable_review: bool = Field(description="Indicate if enable human in the loop review", default=False)
259
260
 
260
261
  def __init__(self, **data):
261
262
  super().__init__(**data)
@@ -266,6 +267,7 @@ class DocClassifierSpec(DocProcCommonNodeSpec):
266
267
  model_spec["version"] = self.version
267
268
  model_spec["config"] = self.config.model_dump()
268
269
  model_spec["task"] = DocProcTask.custom_document_classification
270
+ model_spec["enable_review"] = self.enable_review
269
271
  return model_spec
270
272
 
271
273
  class DocExtSpec(DocProcCommonNodeSpec):
@@ -273,6 +275,7 @@ class DocExtSpec(DocProcCommonNodeSpec):
273
275
  config : DocExtConfig
274
276
  min_confidence: float = Field(description="The minimal confidence acceptable for an extracted field value", default=0.0,le=1.0, ge=0.0 ,title="Minimum Confidence")
275
277
  review_fields: List[str] = Field(description="The fields that require user to review", default=[])
278
+ enable_review: bool = Field(description="Enable human in the loop review", default=False)
276
279
 
277
280
  def __init__(self, **data):
278
281
  super().__init__(**data)
@@ -285,6 +288,7 @@ class DocExtSpec(DocProcCommonNodeSpec):
285
288
  model_spec["task"] = DocProcTask.custom_field_extraction
286
289
  model_spec["min_confidence"] = self.min_confidence
287
290
  model_spec["review_fields"] = self.review_fields
291
+ model_spec["enable_review"] = self.enable_review
288
292
  return model_spec
289
293
 
290
294
  class DocProcField(BaseModel):
@@ -374,8 +378,24 @@ class EndNodeSpec(NodeSpec):
374
378
  def __init__(self, **data):
375
379
  super().__init__(**data)
376
380
  self.kind = "end"
381
+
382
+ class NodeErrorHandlerConfig(BaseModel):
383
+ error_message: Optional[str] = None
384
+ max_retries: Optional[int] = None
385
+ retry_interval: Optional[int] = None
386
+
387
+ def to_json(self) -> dict[str, Any]:
388
+ model_spec = {}
389
+ if self.error_message:
390
+ model_spec["error_message"] = self.error_message
391
+ if self.max_retries:
392
+ model_spec["max_retries"] = self.max_retries
393
+ if self.retry_interval:
394
+ model_spec["retry_interval"] = self.retry_interval
395
+ return model_spec
377
396
  class ToolNodeSpec(NodeSpec):
378
397
  tool: Union[str, ToolSpec] = Field(default = None, description="the tool to use")
398
+ error_handler_config: Optional[NodeErrorHandlerConfig] = None
379
399
 
380
400
  def __init__(self, **data):
381
401
  super().__init__(**data)
@@ -383,6 +403,8 @@ class ToolNodeSpec(NodeSpec):
383
403
 
384
404
  def to_json(self) -> dict[str, Any]:
385
405
  model_spec = super().to_json()
406
+ if self.error_handler_config:
407
+ model_spec["error_handler_config"] = self.error_handler_config.to_json()
386
408
  if self.tool:
387
409
  if isinstance(self.tool, ToolSpec):
388
410
  model_spec["tool"] = self.tool.model_dump(exclude_defaults=True, exclude_none=True, exclude_unset=True)
@@ -732,6 +754,7 @@ class PromptNodeSpec(NodeSpec):
732
754
  prompt_examples: Optional[list[PromptExample]]
733
755
  llm: Optional[str]
734
756
  llm_parameters: Optional[PromptLLMParameters]
757
+ error_handler_config: Optional[NodeErrorHandlerConfig]
735
758
 
736
759
  def __init__(self, **kwargs):
737
760
  super().__init__(**kwargs)
@@ -747,6 +770,8 @@ class PromptNodeSpec(NodeSpec):
747
770
  model_spec["llm"] = self.llm
748
771
  if self.llm_parameters:
749
772
  model_spec["llm_parameters"] = self.llm_parameters.to_json()
773
+ if self.error_handler_config:
774
+ model_spec["error_handler_config"] = self.error_handler_config.to_json()
750
775
  if self.prompt_examples:
751
776
  model_spec["prompt_examples"] = []
752
777
  for example in self.prompt_examples:
@@ -170,19 +170,11 @@ async def import_flow_model(model):
170
170
 
171
171
  return tool_id
172
172
 
173
- def import_flow_support_tools(model):
174
-
175
- if not is_local_dev():
176
- # we can't import support tools into non-local environments yet
177
- return []
178
-
179
-
173
+ def import_flow_support_tools(model):
180
174
  schedulable = False
181
175
  if "schedulable" in model["spec"]:
182
176
  schedulable = model["spec"]["schedulable"]
183
177
 
184
- client = instantiate_client(TempusClient)
185
-
186
178
  logger.info(f"Import 'get_flow_status' tool spec...")
187
179
  tools = [create_flow_status_tool("i__get_flow_status_intrinsic_tool__")]
188
180