ibm-watsonx-orchestrate 1.7.0b1__py3-none-any.whl → 1.8.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. ibm_watsonx_orchestrate/__init__.py +1 -2
  2. ibm_watsonx_orchestrate/agent_builder/agents/types.py +29 -1
  3. ibm_watsonx_orchestrate/agent_builder/connections/types.py +14 -2
  4. ibm_watsonx_orchestrate/cli/commands/channels/types.py +15 -2
  5. ibm_watsonx_orchestrate/cli/commands/channels/webchat/channels_webchat_controller.py +34 -23
  6. ibm_watsonx_orchestrate/cli/commands/connections/connections_command.py +14 -6
  7. ibm_watsonx_orchestrate/cli/commands/connections/connections_controller.py +6 -8
  8. ibm_watsonx_orchestrate/cli/commands/copilot/copilot_command.py +65 -0
  9. ibm_watsonx_orchestrate/cli/commands/copilot/copilot_controller.py +368 -0
  10. ibm_watsonx_orchestrate/cli/commands/copilot/copilot_server_controller.py +170 -0
  11. ibm_watsonx_orchestrate/cli/commands/environment/types.py +1 -1
  12. ibm_watsonx_orchestrate/cli/commands/evaluations/evaluations_command.py +16 -6
  13. ibm_watsonx_orchestrate/cli/commands/evaluations/evaluations_controller.py +20 -2
  14. ibm_watsonx_orchestrate/cli/commands/knowledge_bases/knowledge_bases_controller.py +10 -8
  15. ibm_watsonx_orchestrate/cli/commands/server/server_command.py +3 -9
  16. ibm_watsonx_orchestrate/cli/main.py +3 -0
  17. ibm_watsonx_orchestrate/client/base_api_client.py +12 -0
  18. ibm_watsonx_orchestrate/client/copilot/cpe/copilot_cpe_client.py +67 -0
  19. ibm_watsonx_orchestrate/client/utils.py +49 -9
  20. ibm_watsonx_orchestrate/docker/compose-lite.yml +19 -2
  21. ibm_watsonx_orchestrate/docker/default.env +10 -6
  22. ibm_watsonx_orchestrate/flow_builder/flows/__init__.py +8 -5
  23. ibm_watsonx_orchestrate/flow_builder/flows/flow.py +47 -7
  24. ibm_watsonx_orchestrate/flow_builder/node.py +7 -1
  25. ibm_watsonx_orchestrate/flow_builder/types.py +168 -65
  26. {ibm_watsonx_orchestrate-1.7.0b1.dist-info → ibm_watsonx_orchestrate-1.8.0b1.dist-info}/METADATA +2 -2
  27. {ibm_watsonx_orchestrate-1.7.0b1.dist-info → ibm_watsonx_orchestrate-1.8.0b1.dist-info}/RECORD +30 -26
  28. {ibm_watsonx_orchestrate-1.7.0b1.dist-info → ibm_watsonx_orchestrate-1.8.0b1.dist-info}/WHEEL +0 -0
  29. {ibm_watsonx_orchestrate-1.7.0b1.dist-info → ibm_watsonx_orchestrate-1.8.0b1.dist-info}/entry_points.txt +0 -0
  30. {ibm_watsonx_orchestrate-1.7.0b1.dist-info → ibm_watsonx_orchestrate-1.8.0b1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,368 @@
1
+ import logging
2
+ import os
3
+ import sys
4
+ import csv
5
+
6
+ import rich
7
+ from rich.console import Console
8
+ from rich.prompt import Prompt
9
+ from rich.progress import Progress, SpinnerColumn, TextColumn
10
+ from requests import ConnectionError
11
+ from typing import List
12
+ from ibm_watsonx_orchestrate.client.base_api_client import ClientAPIException
13
+ from ibm_watsonx_orchestrate.agent_builder.tools import ToolSpec, ToolPermission, ToolRequestBody, ToolResponseBody
14
+ from ibm_watsonx_orchestrate.cli.commands.agents.agents_controller import AgentsController, AgentKind, SpecVersion
15
+ from ibm_watsonx_orchestrate.agent_builder.agents.types import DEFAULT_LLM, BaseAgentSpec
16
+ from ibm_watsonx_orchestrate.client.agents.agent_client import AgentClient
17
+ from ibm_watsonx_orchestrate.client.tools.tool_client import ToolClient
18
+ from ibm_watsonx_orchestrate.client.copilot.cpe.copilot_cpe_client import CPEClient
19
+ from ibm_watsonx_orchestrate.client.utils import instantiate_client
20
+ from ibm_watsonx_orchestrate.utils.exceptions import BadRequest
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ def _validate_output_file(output_file: str, dry_run_flag: bool) -> None:
26
+ if not output_file and not dry_run_flag:
27
+ logger.error(
28
+ "Please provide a valid yaml output file. Or use the `--dry-run` flag to output generated agent content to terminal")
29
+ sys.exit(1)
30
+
31
+ if output_file and dry_run_flag:
32
+ logger.error("Cannot set output file when performing a dry run")
33
+ sys.exit(1)
34
+
35
+ if output_file:
36
+ _, file_extension = os.path.splitext(output_file)
37
+ if file_extension not in {".yaml", ".yml", ".json"}:
38
+ logger.error("Output file must be of type '.yaml', '.yml' or '.json'")
39
+ sys.exit(1)
40
+
41
+
42
+ def _get_progress_spinner() -> Progress:
43
+ console = Console()
44
+ return Progress(
45
+ SpinnerColumn(spinner_name="dots"),
46
+ TextColumn("[progress.description]{task.description}"),
47
+ transient=True,
48
+ console=console,
49
+ )
50
+
51
+
52
+ def _get_incomplete_tool_from_name(tool_name: str) -> dict:
53
+ input_schema = ToolRequestBody(**{"type": "object", "properties": {}})
54
+ output_schema = ToolResponseBody(**{"description": "None"})
55
+ spec = ToolSpec(**{"name": tool_name, "description": tool_name, "permission": ToolPermission.ADMIN,
56
+ "input_schema": input_schema, "output_schema": output_schema})
57
+ return spec.model_dump()
58
+
59
+ def _get_incomplete_agent_from_name(agent_name: str) -> dict:
60
+ spec = BaseAgentSpec(**{"name": agent_name, "description": agent_name, "kind": AgentKind.NATIVE})
61
+ return spec.model_dump()
62
+
63
+ def _get_tools_from_names(tool_names: List[str]) -> List[dict]:
64
+ if not len(tool_names):
65
+ return []
66
+
67
+ tool_client = get_tool_client()
68
+
69
+ try:
70
+ with _get_progress_spinner() as progress:
71
+ task = progress.add_task(description="Fetching tools", total=None)
72
+ tools = tool_client.get_drafts_by_names(tool_names)
73
+ found_tools = {tool.get("name") for tool in tools}
74
+ progress.remove_task(task)
75
+ progress.refresh()
76
+ for tool_name in tool_names:
77
+ if tool_name not in found_tools:
78
+ logger.warning(
79
+ f"Failed to find tool named '{tool_name}'. Falling back to incomplete tool definition. Copilot performance maybe effected.")
80
+ tools.append(_get_incomplete_tool_from_name(tool_name))
81
+ except ConnectionError:
82
+ logger.warning(
83
+ f"Failed to fetch tools from server. For optimal results please start the server and import the relevant tools {', '.join(tool_names)}.")
84
+ tools = []
85
+ for tool_name in tool_names:
86
+ tools.append(_get_incomplete_tool_from_name(tool_name))
87
+
88
+ return tools
89
+
90
+
91
+ def _get_agents_from_names(collaborators_names: List[str]) -> List[dict]:
92
+ if not len(collaborators_names):
93
+ return []
94
+
95
+ native_agents_client = get_native_client()
96
+
97
+ try:
98
+ with _get_progress_spinner() as progress:
99
+ task = progress.add_task(description="Fetching agents", total=None)
100
+ agents = native_agents_client.get_drafts_by_names(collaborators_names)
101
+ found_agents = {tool.get("name") for tool in agents}
102
+ progress.remove_task(task)
103
+ progress.refresh()
104
+ for collaborator_name in collaborators_names:
105
+ if collaborator_name not in found_agents:
106
+ logger.warning(
107
+ f"Failed to find agent named '{collaborator_name}'. Falling back to incomplete agent definition. Copilot performance maybe effected.")
108
+ agents.append(_get_incomplete_agent_from_name(collaborator_name))
109
+ except ConnectionError:
110
+ logger.warning(
111
+ f"Failed to fetch tools from server. For optimal results please start the server and import the relevant tools {', '.join(collaborators_names)}.")
112
+ agents = []
113
+ for collaborator_name in collaborators_names:
114
+ agents.append(_get_incomplete_agent_from_name(collaborator_name))
115
+
116
+ return agents
117
+
118
+ def get_cpe_client() -> CPEClient:
119
+ url = os.getenv('CPE_URL', "http://localhost:8081")
120
+ return instantiate_client(client=CPEClient, url=url)
121
+
122
+
123
+ def get_tool_client(*args, **kwargs):
124
+ return instantiate_client(ToolClient)
125
+
126
+
127
+ def get_native_client(*args, **kwargs):
128
+ return instantiate_client(AgentClient)
129
+
130
+
131
+ def gather_utterances(max: int) -> list[str]:
132
+ utterances = []
133
+ logger.info("Please provide 3 sample utterances you expect your agent to handle:")
134
+
135
+ count = 0
136
+
137
+ while count < max:
138
+ utterance = Prompt.ask(" [green]>[/green]").strip()
139
+
140
+ if utterance.lower() == 'q':
141
+ break
142
+
143
+ if utterance:
144
+ utterances.append(utterance)
145
+ count += 1
146
+
147
+ return utterances
148
+
149
+
150
+ def get_deployed_tools_agents():
151
+ all_tools = find_tools_by_description(tool_client=get_tool_client(), description=None)
152
+ # TODO: this brings only the "native" agents. Can external and assistant agents also be collaborators?
153
+ all_agents = find_agents(agent_client=get_native_client())
154
+ return {"tools": all_tools, "agents": all_agents}
155
+
156
+
157
+ def pre_cpe_step(cpe_client):
158
+ tools_agents = get_deployed_tools_agents()
159
+ user_message = ""
160
+ with _get_progress_spinner() as progress:
161
+ task = progress.add_task(description="Initilizing Prompt Engine", total=None)
162
+ response = cpe_client.submit_pre_cpe_chat(user_message=user_message)
163
+ progress.remove_task(task)
164
+
165
+ res = {}
166
+ while True:
167
+ if "message" in response and response["message"]:
168
+ rich.print('\n🤖 Copilot: ' + response["message"])
169
+ user_message = Prompt.ask("\n👤 You").strip()
170
+ message_content = {"user_message": user_message}
171
+ elif "description" in response and response["description"]:
172
+ res["description"] = response["description"]
173
+ message_content = tools_agents
174
+ elif "metadata" in response:
175
+ res["agent_name"] = response["metadata"]["agent_name"]
176
+ res["agent_style"] = response["metadata"]["style"]
177
+ res["tools"] = [t for t in tools_agents["tools"] if t["name"] in response["metadata"]["tools"]]
178
+ res["collaborators"] = [a for a in tools_agents["agents"] if
179
+ a["name"] in response["metadata"]["collaborators"]]
180
+ return res
181
+ with _get_progress_spinner() as progress:
182
+ task = progress.add_task(description="Thinking...", total=None)
183
+ response = cpe_client.submit_pre_cpe_chat(**message_content)
184
+ progress.remove_task(task)
185
+
186
+
187
+ def find_tools_by_description(description, tool_client):
188
+ with _get_progress_spinner() as progress:
189
+ task = progress.add_task(description="Fetching Tools", total=None)
190
+ try:
191
+ tools = tool_client.get()
192
+ progress.remove_task(task)
193
+ except ConnectionError:
194
+ tools = []
195
+ progress.remove_task(task)
196
+ progress.refresh()
197
+ logger.warning("Failed to contact wxo server to fetch tools. Proceeding with empty tool list")
198
+ return tools
199
+
200
+ def find_agents(agent_client):
201
+ with _get_progress_spinner() as progress:
202
+ task = progress.add_task(description="Fetching Agents", total=None)
203
+ try:
204
+ agents = agent_client.get()
205
+ progress.remove_task(task)
206
+ except ConnectionError:
207
+ agents = []
208
+ progress.remove_task(task)
209
+ progress.refresh()
210
+ logger.warning("Failed to contact wxo server to fetch agents. Proceeding with empty agent list")
211
+ return agents
212
+
213
+
214
+ def gather_examples(samples_file=None):
215
+ if samples_file:
216
+ if samples_file.endswith('.txt'):
217
+ with open(samples_file) as f:
218
+ examples = f.read().split('\n')
219
+ elif samples_file.endswith('.csv'):
220
+ with open(samples_file, 'r', encoding='utf-8') as f:
221
+ reader = csv.DictReader(f)
222
+ if 'utterance' not in reader.fieldnames:
223
+ raise BadRequest("CSV must have a column named 'utterance'")
224
+ examples = [row['utterance'].strip() for row in reader if row['utterance'].strip()]
225
+ else:
226
+ raise BadRequest(f'Unsupported samples file format: {os.path.basename(samples_file)}')
227
+ else:
228
+ examples = gather_utterances(3)
229
+
230
+ console = Console()
231
+ logger.info("You provided the following samples:")
232
+ for i, utterance in enumerate(examples, 1):
233
+ console.print(f" {i}. {utterance}")
234
+
235
+ return examples
236
+
237
+
238
+ def talk_to_cpe(cpe_client, samples_file=None, context_data=None):
239
+ context_data = context_data or {}
240
+ examples = gather_examples(samples_file)
241
+ # upload or gather input examples
242
+ context_data['examples'] = examples
243
+ response = None
244
+ with _get_progress_spinner() as progress:
245
+ task = progress.add_task(description="Thinking...", total=None)
246
+ response = cpe_client.init_with_context(context_data=context_data)
247
+ progress.remove_task(task)
248
+ accepted_prompt = None
249
+ while accepted_prompt is None:
250
+ resp = response.get('response')[0]
251
+ accepted_prompt = resp.get("final_zsh_prompt", None)
252
+ if not accepted_prompt:
253
+ cpe_message = resp.get("message", "")
254
+ rich.print('\n🤖 Copilot: ' + cpe_message)
255
+ message = Prompt.ask("\n👤 You").strip()
256
+ with _get_progress_spinner() as progress:
257
+ task = progress.add_task(description="Thinking...", total=None)
258
+ response = cpe_client.invoke(prompt=message)
259
+ progress.remove_task(task)
260
+
261
+ return accepted_prompt
262
+
263
+
264
+ def prompt_tune(agent_spec: str, output_file: str | None, samples_file: str | None, dry_run_flag: bool) -> None:
265
+ agent = AgentsController.import_agent(file=agent_spec, app_id=None)[0]
266
+ agent_kind = agent.kind
267
+
268
+ if agent_kind != AgentKind.NATIVE:
269
+ logger.error(
270
+ f"Only native agents are supported for prompt tuning. Provided agent spec is on kind '{agent_kind}'")
271
+ sys.exit(1)
272
+
273
+ if not output_file and not dry_run_flag:
274
+ output_file = agent_spec
275
+
276
+ _validate_output_file(output_file, dry_run_flag)
277
+
278
+ client = get_cpe_client()
279
+
280
+ instr = agent.instructions
281
+
282
+ tools = _get_tools_from_names(agent.tools)
283
+
284
+ collaborators = _get_agents_from_names(agent.collaborators)
285
+ try:
286
+ new_prompt = talk_to_cpe(cpe_client=client, samples_file=samples_file,
287
+ context_data={"initial_instruction": instr, 'tools': tools, 'description': agent.description,
288
+ "collaborators": collaborators})
289
+ except ConnectionError:
290
+ logger.error(
291
+ "Failed to connect to Copilot server. Please ensure Copilot is running via `orchestrate copilot start`")
292
+ sys.exit(1)
293
+ except ClientAPIException:
294
+ logger.error("An unexpected server error has occur with in the Copilot server. Please check the logs via `orchestrate server logs`")
295
+ sys.exit(1)
296
+
297
+ if new_prompt:
298
+ logger.info(f"The new instruction is: {new_prompt}")
299
+ agent.instructions = new_prompt
300
+
301
+ if dry_run_flag:
302
+ rich.print(agent.model_dump(exclude_none=True))
303
+ else:
304
+ if os.path.dirname(output_file):
305
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
306
+ AgentsController.persist_record(agent, output_file=output_file)
307
+
308
+
309
+ def create_agent(output_file: str, llm: str, samples_file: str | None, dry_run_flag: bool = False) -> None:
310
+ _validate_output_file(output_file, dry_run_flag)
311
+ # 1. prepare the clients
312
+ cpe_client = get_cpe_client()
313
+
314
+ # 2. Pre-CPE stage:
315
+ try:
316
+ res = pre_cpe_step(cpe_client)
317
+ except ConnectionError:
318
+ logger.error(
319
+ "Failed to connect to Copilot server. Please ensure Copilot is running via `orchestrate copilot start`")
320
+ sys.exit(1)
321
+ except ClientAPIException:
322
+ logger.error("An unexpected server error has occur with in the Copilot server. Please check the logs via `orchestrate server logs`")
323
+ sys.exit(1)
324
+
325
+ tools = res["tools"]
326
+ collaborators = res["collaborators"]
327
+ description = res["description"]
328
+ agent_name = res["agent_name"]
329
+ agent_style = res["agent_style"]
330
+
331
+ # 4. discuss the instructions
332
+ instructions = talk_to_cpe(cpe_client, samples_file, {'description': description, 'tools': tools, 'collaborators': collaborators})
333
+
334
+ # 6. create and save the agent
335
+ llm = llm if llm else DEFAULT_LLM
336
+ params = {
337
+ 'style': agent_style,
338
+ 'tools': [t['name'] for t in tools],
339
+ 'llm': llm,
340
+ 'collaborators': [c['name'] for c in collaborators]
341
+ }
342
+ agent = AgentsController.generate_agent_spec(agent_name, AgentKind.NATIVE, description, **params)
343
+ agent.instructions = instructions
344
+ agent.spec_version = SpecVersion.V1
345
+
346
+ if dry_run_flag:
347
+ rich.print(agent.model_dump(exclude_none=True))
348
+ return
349
+
350
+ if os.path.dirname(output_file):
351
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
352
+ AgentsController.persist_record(agent, output_file=output_file)
353
+
354
+ message_lines = [
355
+ "Your agent building session finished successfully!",
356
+ f"Agent YAML saved in file:",
357
+ f"{os.path.abspath(output_file)}"
358
+ ]
359
+
360
+ # Determine the width of the frame
361
+ max_length = max(len(line) for line in message_lines)
362
+ frame_width = max_length + 4 # Padding for aesthetics
363
+
364
+ # Print the framed message
365
+ rich.print("╔" + "═" * frame_width + "╗")
366
+ for line in message_lines:
367
+ rich.print("║ " + line.ljust(max_length) + " ║")
368
+ rich.print("╚" + "═" * frame_width + "╝")
@@ -0,0 +1,170 @@
1
+ import logging
2
+ import sys
3
+ from pathlib import Path
4
+ import subprocess
5
+ import time
6
+ import requests
7
+ from urllib.parse import urlparse
8
+ from ibm_watsonx_orchestrate.cli.commands.server.server_command import (
9
+ get_compose_file,
10
+ ensure_docker_compose_installed,
11
+ _prepare_clean_env,
12
+ ensure_docker_installed,
13
+ read_env_file,
14
+ get_default_env_file,
15
+ get_persisted_user_env,
16
+ get_dev_edition_source,
17
+ get_default_registry_env_vars_by_dev_edition_source,
18
+ docker_login_by_dev_edition_source,
19
+ write_merged_env_file,
20
+ apply_server_env_dict_defaults
21
+ )
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+ def wait_for_wxo_cpe_health_check(timeout_seconds=45, interval_seconds=2):
26
+ url = "http://localhost:8081/version"
27
+ logger.info("Waiting for Copilot component to be initialized...")
28
+ start_time = time.time()
29
+ while time.time() - start_time <= timeout_seconds:
30
+ try:
31
+ response = requests.get(url)
32
+ if 200 <= response.status_code < 300:
33
+ return True
34
+ else:
35
+ pass
36
+ except requests.RequestException as e:
37
+ pass
38
+
39
+ time.sleep(interval_seconds)
40
+ return False
41
+
42
+ def _trim_authorization_urls(env_dict: dict) -> dict:
43
+ auth_url_key = "AUTHORIZATION_URL"
44
+ env_dict_copy = env_dict.copy()
45
+
46
+ auth_url = env_dict_copy.get(auth_url_key)
47
+ if not auth_url:
48
+ return env_dict_copy
49
+
50
+
51
+ parsed_url = urlparse(auth_url)
52
+ new_url = f"{parsed_url.scheme}://{parsed_url.netloc}"
53
+ env_dict_copy[auth_url_key] = new_url
54
+
55
+ return env_dict_copy
56
+
57
+
58
+
59
+
60
+ def run_compose_lite_cpe(user_env_file: Path) -> bool:
61
+ compose_path = get_compose_file()
62
+ compose_command = ensure_docker_compose_installed()
63
+ _prepare_clean_env(user_env_file)
64
+ ensure_docker_installed()
65
+
66
+ default_env = read_env_file(get_default_env_file())
67
+ user_env = read_env_file(user_env_file) if user_env_file else {}
68
+ if not user_env:
69
+ user_env = get_persisted_user_env() or {}
70
+
71
+ dev_edition_source = get_dev_edition_source(user_env)
72
+ default_registry_vars = get_default_registry_env_vars_by_dev_edition_source(default_env, user_env, source=dev_edition_source)
73
+
74
+ # Update the default environment with the default registry variables only if they are not already set
75
+ for key in default_registry_vars:
76
+ if key not in default_env or not default_env[key]:
77
+ default_env[key] = default_registry_vars[key]
78
+
79
+ # Merge the default environment with the user environment
80
+ merged_env_dict = {
81
+ **default_env,
82
+ **user_env,
83
+ }
84
+
85
+ merged_env_dict = apply_server_env_dict_defaults(merged_env_dict)
86
+ merged_env_dict = _trim_authorization_urls(merged_env_dict)
87
+
88
+ try:
89
+ docker_login_by_dev_edition_source(merged_env_dict, dev_edition_source)
90
+ except ValueError as ignored:
91
+ # do nothing, as the docker login here is not mandatory
92
+ pass
93
+
94
+ final_env_file = write_merged_env_file(merged_env_dict)
95
+
96
+ command = compose_command + [
97
+ "-f", str(compose_path),
98
+ "--env-file", str(final_env_file),
99
+ "up",
100
+ "cpe",
101
+ "-d",
102
+ "--remove-orphans"
103
+ ]
104
+
105
+ logger.info(f"Starting docker-compose Copilot service...")
106
+ result = subprocess.run(command, capture_output=False)
107
+
108
+ if result.returncode == 0:
109
+ logger.info("Copilot Service started successfully.")
110
+ # Remove the temp file if successful
111
+ if final_env_file.exists():
112
+ final_env_file.unlink()
113
+ else:
114
+ error_message = result.stderr.decode('utf-8') if result.stderr else "Error occurred."
115
+ logger.error(
116
+ f"Error running docker-compose (temporary env file left at {final_env_file}):\n{error_message}"
117
+ )
118
+ return False
119
+
120
+ is_successful_cpe_healthcheck = wait_for_wxo_cpe_health_check()
121
+ if not is_successful_cpe_healthcheck:
122
+ logger.error("The Copilot service did not initialize within the expected time. Check the logs for any errors.")
123
+
124
+ return True
125
+
126
+ def run_compose_lite_cpe_down(is_reset: bool = False) -> None:
127
+ compose_path = get_compose_file()
128
+ compose_command = ensure_docker_compose_installed()
129
+ ensure_docker_installed()
130
+
131
+ default_env = read_env_file(get_default_env_file())
132
+ final_env_file = write_merged_env_file(default_env)
133
+
134
+ command = compose_command + [
135
+ "-f", str(compose_path),
136
+ "--env-file", final_env_file,
137
+ "down",
138
+ "cpe"
139
+ ]
140
+
141
+ if is_reset:
142
+ command.append("--volumes")
143
+ logger.info("Stopping docker-compose Copilot service and resetting volumes...")
144
+ else:
145
+ logger.info("Stopping docker-compose Copilot service...")
146
+
147
+ result = subprocess.run(command, capture_output=False)
148
+
149
+ if result.returncode == 0:
150
+ logger.info("Copilot service stopped successfully.")
151
+ # Remove the temp file if successful
152
+ if final_env_file.exists():
153
+ final_env_file.unlink()
154
+ else:
155
+ error_message = result.stderr.decode('utf-8') if result.stderr else "Error occurred."
156
+ logger.error(
157
+ f"Error running docker-compose (temporary env file left at {final_env_file}):\n{error_message}"
158
+ )
159
+ sys.exit(1)
160
+
161
+ def start_server(user_env_file_path: Path) -> None:
162
+ is_server_started = run_compose_lite_cpe(user_env_file=user_env_file_path)
163
+
164
+ if is_server_started:
165
+ logger.info("Copilot service successfully started")
166
+ else:
167
+ logger.error("Unable to start orchestrate Copilot service. Please check error messages and logs")
168
+
169
+ def stop_server() -> None:
170
+ run_compose_lite_cpe_down()
@@ -9,4 +9,4 @@ class EnvironmentAuthType(str, Enum):
9
9
  CPD = 'cpd'
10
10
 
11
11
  def __str__(self):
12
- return self.value
12
+ return self.value
@@ -25,14 +25,20 @@ def read_env_file(env_path: Path|str) -> dict:
25
25
  return dotenv_values(str(env_path))
26
26
 
27
27
  def validate_watsonx_credentials(user_env_file: str) -> bool:
28
- required_keys = ["WATSONX_SPACE_ID", "WATSONX_APIKEY"]
28
+ required_sets = [
29
+ ["WATSONX_SPACE_ID", "WATSONX_APIKEY"],
30
+ ["WO_INSTANCE", "WO_API_KEY"]
31
+ ]
29
32
 
30
- if all(key in os.environ for key in required_keys):
33
+ def has_valid_keys(env: dict) -> bool:
34
+ return any(all(key in env for key in key_set) for key_set in required_sets)
35
+
36
+ if has_valid_keys(os.environ):
31
37
  logger.info("WatsonX credentials validated successfully.")
32
38
  return
33
39
 
34
40
  if user_env_file is None:
35
- logger.error("WatsonX credentials are not set. Please set WATSONX_SPACE_ID and WATSONX_APIKEY in your system environment variables or include them in your enviroment file and pass it with --env-file option.")
41
+ logger.error("WatsonX credentials are not set. Please set either WATSONX_SPACE_ID and WATSONX_APIKEY or WO_INSTANCE and WO_API_KEY in your system environment variables or include them in your environment file and pass it with --env-file option.")
36
42
  sys.exit(1)
37
43
 
38
44
  if not Path(user_env_file).exists():
@@ -41,11 +47,15 @@ def validate_watsonx_credentials(user_env_file: str) -> bool:
41
47
 
42
48
  user_env = read_env_file(user_env_file)
43
49
 
44
- if not all(key in user_env for key in required_keys):
45
- logger.error("Error: The environment file does not contain the required keys: WATSONX_SPACE_ID and WATSONX_APIKEY.")
50
+ if not has_valid_keys(user_env):
51
+ logger.error("Error: The environment file does not contain the required keys: either WATSONX_SPACE_ID and WATSONX_APIKEY or WO_INSTANCE and WO_API_KEY.")
46
52
  sys.exit(1)
47
53
 
48
- os.environ.update({key: user_env[key] for key in required_keys})
54
+ # Update os.environ with whichever set is present
55
+ for key_set in required_sets:
56
+ if all(key in user_env for key in key_set):
57
+ os.environ.update({key: user_env[key] for key in key_set})
58
+ break
49
59
  logger.info("WatsonX credentials validated successfully.")
50
60
 
51
61
  def read_csv(data_path: str, delimiter="\t"):
@@ -3,12 +3,12 @@ import os.path
3
3
  from typing import List, Dict, Optional, Tuple
4
4
  import csv
5
5
  from pathlib import Path
6
- import rich
6
+ import sys
7
7
  from wxo_agentic_evaluation import main as evaluate
8
8
  from wxo_agentic_evaluation.tool_planner import build_snapshot
9
9
  from wxo_agentic_evaluation.analyze_run import analyze
10
10
  from wxo_agentic_evaluation.batch_annotate import generate_test_cases_from_stories
11
- from wxo_agentic_evaluation.arg_configs import TestConfig, AuthConfig, LLMUserConfig, ChatRecordingConfig, AnalyzeConfig
11
+ from wxo_agentic_evaluation.arg_configs import TestConfig, AuthConfig, LLMUserConfig, ChatRecordingConfig, AnalyzeConfig, ProviderConfig
12
12
  from wxo_agentic_evaluation.record_chat import record_chats
13
13
  from wxo_agentic_evaluation.external_agent.external_validate import ExternalAgentValidation
14
14
  from wxo_agentic_evaluation.external_agent.performance_test import ExternalAgentPerformanceTest
@@ -41,12 +41,26 @@ class EvaluationsController:
41
41
  def evaluate(self, config_file: Optional[str] = None, test_paths: Optional[str] = None, output_dir: Optional[str] = None) -> None:
42
42
  url, tenant_name, token = self._get_env_config()
43
43
 
44
+ if "WATSONX_SPACE_ID" in os.environ and "WATSONX_APIKEY" in os.environ:
45
+ provider = "watsonx"
46
+ elif "WO_INSTANCE" in os.environ and "WO_API_KEY" in os.environ:
47
+ provider = "model_proxy"
48
+ else:
49
+ logger.error(
50
+ "No provider found. Please either provide a config_file or set either WATSONX_SPACE_ID and WATSONX_APIKEY or WO_INSTANCE and WO_API_KEY in your system environment variables."
51
+ )
52
+ sys.exit(1)
53
+
44
54
  config_data = {
45
55
  "wxo_lite_version": __version__,
46
56
  "auth_config": AuthConfig(
47
57
  url=url,
48
58
  tenant_name=tenant_name,
49
59
  token=token
60
+ ),
61
+ "provider_config": ProviderConfig(
62
+ provider=provider,
63
+ model_id="meta-llama/llama-3-405b-instruct",
50
64
  )
51
65
  }
52
66
 
@@ -62,6 +76,10 @@ class EvaluationsController:
62
76
  if "llm_user_config" in file_config:
63
77
  llm_config_data = file_config.pop("llm_user_config")
64
78
  config_data["llm_user_config"] = LLMUserConfig(**llm_config_data)
79
+
80
+ if "provider_config" in file_config:
81
+ provider_config_data = file_config.pop("provider_config")
82
+ config_data["provider_config"] = ProviderConfig(**provider_config_data)
65
83
 
66
84
  config_data.update(file_config)
67
85
 
@@ -71,6 +71,7 @@ class KnowledgeBaseController:
71
71
  client = self.get_client()
72
72
 
73
73
  knowledge_bases = parse_file(file=file)
74
+
74
75
  existing_knowledge_bases = client.get_by_names([kb.name for kb in knowledge_bases])
75
76
 
76
77
  for kb in knowledge_bases:
@@ -137,23 +138,24 @@ class KnowledgeBaseController:
137
138
 
138
139
  def update_knowledge_base(
139
140
  self, knowledge_base_id: str, kb: KnowledgeBase, file_dir: str
140
- ) -> None:
141
- filtered_files = []
142
-
141
+ ) -> None:
143
142
  if kb.documents:
144
143
  status = self.get_client().status(knowledge_base_id)
145
144
  existing_docs = [doc.get("metadata", {}).get("original_file_name", "") for doc in status.get("documents", [])]
146
145
 
146
+ removed_docs = existing_docs[:]
147
147
  for filepath in kb.documents:
148
148
  filename = get_file_name(filepath)
149
149
 
150
150
  if filename in existing_docs:
151
- logger.warning(f'Document \"{filename}\" already exists in knowledge base, skipping.')
152
- else:
153
- filtered_files.append(filepath)
151
+ logger.warning(f'Document \"{filename}\" already exists in knowledge base. Updating...')
152
+ removed_docs.remove(filename)
153
+
154
+ for filename in removed_docs:
155
+ logger.warning(f'Document \"{filename}\" removed from knowledge base.')
156
+
154
157
 
155
- if filtered_files:
156
- files = [('files', (get_file_name(file_path), open(get_relative_file_path(file_path, file_dir), 'rb'))) for file_path in filtered_files]
158
+ files = [('files', (get_file_name(file_path), open(get_relative_file_path(file_path, file_dir), 'rb'))) for file_path in kb.documents]
157
159
 
158
160
  kb.prioritize_built_in_index = True
159
161
  payload = kb.model_dump(exclude_none=True);