ibm-watsonx-orchestrate 1.13.0b0__py3-none-any.whl → 1.13.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ibm_watsonx_orchestrate/__init__.py +1 -1
- ibm_watsonx_orchestrate/agent_builder/knowledge_bases/types.py +2 -0
- ibm_watsonx_orchestrate/cli/commands/agents/agents_controller.py +4 -2
- ibm_watsonx_orchestrate/cli/commands/copilot/copilot_command.py +13 -2
- ibm_watsonx_orchestrate/cli/commands/copilot/copilot_controller.py +29 -20
- ibm_watsonx_orchestrate/cli/commands/evaluations/evaluations_command.py +30 -3
- ibm_watsonx_orchestrate/cli/commands/evaluations/evaluations_environment_manager.py +158 -0
- ibm_watsonx_orchestrate/cli/commands/knowledge_bases/knowledge_bases_command.py +26 -0
- ibm_watsonx_orchestrate/cli/commands/knowledge_bases/knowledge_bases_controller.py +112 -25
- ibm_watsonx_orchestrate/cli/commands/server/server_command.py +31 -1
- ibm_watsonx_orchestrate/client/agents/agent_client.py +1 -1
- ibm_watsonx_orchestrate/client/copilot/cpe/copilot_cpe_client.py +24 -13
- ibm_watsonx_orchestrate/client/tools/tempus_client.py +4 -2
- ibm_watsonx_orchestrate/docker/compose-lite.yml +1 -0
- ibm_watsonx_orchestrate/docker/default.env +8 -9
- ibm_watsonx_orchestrate/flow_builder/flows/flow.py +8 -4
- ibm_watsonx_orchestrate/flow_builder/types.py +4 -0
- ibm_watsonx_orchestrate/flow_builder/utils.py +1 -9
- {ibm_watsonx_orchestrate-1.13.0b0.dist-info → ibm_watsonx_orchestrate-1.13.0b1.dist-info}/METADATA +2 -2
- {ibm_watsonx_orchestrate-1.13.0b0.dist-info → ibm_watsonx_orchestrate-1.13.0b1.dist-info}/RECORD +23 -22
- {ibm_watsonx_orchestrate-1.13.0b0.dist-info → ibm_watsonx_orchestrate-1.13.0b1.dist-info}/WHEEL +0 -0
- {ibm_watsonx_orchestrate-1.13.0b0.dist-info → ibm_watsonx_orchestrate-1.13.0b1.dist-info}/entry_points.txt +0 -0
- {ibm_watsonx_orchestrate-1.13.0b0.dist-info → ibm_watsonx_orchestrate-1.13.0b1.dist-info}/licenses/LICENSE +0 -0
@@ -11,6 +11,7 @@ class SpecVersion(str, Enum):
|
|
11
11
|
|
12
12
|
class KnowledgeBaseKind(str, Enum):
|
13
13
|
KNOWLEDGE_BASE = "knowledge_base"
|
14
|
+
|
14
15
|
class RetrievalConfidenceThreshold(str, Enum):
|
15
16
|
Off = "Off"
|
16
17
|
Lowest = "Lowest"
|
@@ -240,6 +241,7 @@ class AstraDBConnection(BaseModel):
|
|
240
241
|
|
241
242
|
class IndexConnection(BaseModel):
|
242
243
|
connection_id: Optional[str] = None
|
244
|
+
app_id: Optional[str] = None
|
243
245
|
milvus: Optional[MilvusConnection] = None
|
244
246
|
elastic_search: Optional[ElasticSearchConnection] = None
|
245
247
|
custom_search: Optional[CustomSearchConnection] = None
|
@@ -16,7 +16,7 @@ from pydantic import BaseModel
|
|
16
16
|
from ibm_watsonx_orchestrate.agent_builder.agents.types import AgentStyle
|
17
17
|
from ibm_watsonx_orchestrate.agent_builder.tools.types import ToolSpec
|
18
18
|
from ibm_watsonx_orchestrate.cli.commands.tools.tools_controller import import_python_tool, ToolsController
|
19
|
-
from ibm_watsonx_orchestrate.cli.commands.knowledge_bases.knowledge_bases_controller import import_python_knowledge_base
|
19
|
+
from ibm_watsonx_orchestrate.cli.commands.knowledge_bases.knowledge_bases_controller import import_python_knowledge_base, KnowledgeBaseController
|
20
20
|
from ibm_watsonx_orchestrate.cli.commands.models.models_controller import import_python_model
|
21
21
|
from ibm_watsonx_orchestrate.cli.common import ListFormats, rich_table_to_markdown
|
22
22
|
|
@@ -1362,8 +1362,10 @@ class AgentsController:
|
|
1362
1362
|
ToolSpec.model_validate(current_spec).model_dump_json(exclude_unset=True,indent=2)
|
1363
1363
|
)
|
1364
1364
|
|
1365
|
+
knowledge_base_controller = KnowledgeBaseController()
|
1365
1366
|
for kb_name in agent_spec_file_content.get("knowledge_base", []):
|
1366
|
-
|
1367
|
+
knowledge_base_file_path = f"{output_file_name}/knowledge-bases/{kb_name}.yaml"
|
1368
|
+
knowledge_base_controller.knowledge_base_export(name=kb_name, output_path=knowledge_base_file_path, zip_file_out=zip_file_out)
|
1367
1369
|
|
1368
1370
|
if kind == AgentKind.NATIVE:
|
1369
1371
|
for collaborator_id in agent.collaborators:
|
@@ -43,6 +43,10 @@ def prompt_tume_command(
|
|
43
43
|
str,
|
44
44
|
typer.Option("--llm", help="Select the agent LLM"),
|
45
45
|
] = None,
|
46
|
+
chat_llm: Annotated[
|
47
|
+
str,
|
48
|
+
typer.Option("--chat-llm", help="Select the underlying model for the copilot. Currently only llama-3-3-70b-instruct is supported."),
|
49
|
+
] = None,
|
46
50
|
samples: Annotated[
|
47
51
|
str,
|
48
52
|
typer.Option("--samples", "-s", help="Path to utterances sample file (txt file where each line is a utterance, or csv file with a single \"input\" column)"),
|
@@ -51,6 +55,7 @@ def prompt_tume_command(
|
|
51
55
|
if file is None:
|
52
56
|
# create agent yaml from scratch
|
53
57
|
create_agent(
|
58
|
+
chat_llm=chat_llm,
|
54
59
|
llm=llm,
|
55
60
|
output_file=output_file,
|
56
61
|
samples_file=samples,
|
@@ -59,6 +64,7 @@ def prompt_tume_command(
|
|
59
64
|
else:
|
60
65
|
# improve existing agent instruction
|
61
66
|
prompt_tune(
|
67
|
+
chat_llm=chat_llm,
|
62
68
|
agent_spec=file,
|
63
69
|
samples_file=samples,
|
64
70
|
output_file=output_file,
|
@@ -77,12 +83,17 @@ def agent_refine(
|
|
77
83
|
] = None,
|
78
84
|
use_last_chat: Annotated[
|
79
85
|
bool,
|
80
|
-
typer.Option("--
|
86
|
+
typer.Option("--use-last-chat", "-l", help="Tuning by using the last conversation with the agent instead of prompting the user to choose chats"),
|
81
87
|
] = False,
|
82
88
|
dry_run_flag: Annotated[
|
83
89
|
bool,
|
84
90
|
typer.Option("--dry-run",
|
85
91
|
help="Dry run will prevent the tuned content being saved and output the results to console"),
|
86
92
|
] = False,
|
93
|
+
chat_llm: Annotated[
|
94
|
+
str,
|
95
|
+
typer.Option("--chat-llm", help="Select the underlying model for the copilot. Currently only llama-3-3-70b-instruct is supported."),
|
96
|
+
] = None,
|
97
|
+
|
87
98
|
):
|
88
|
-
refine_agent_with_trajectories(agent_name, output_file, use_last_chat, dry_run_flag)
|
99
|
+
refine_agent_with_trajectories(agent_name, chat_llm=chat_llm, output_file=output_file, use_last_chat=use_last_chat, dry_run_flag=dry_run_flag)
|
@@ -3,6 +3,7 @@ import os
|
|
3
3
|
import sys
|
4
4
|
import csv
|
5
5
|
import difflib
|
6
|
+
import re
|
6
7
|
from datetime import datetime
|
7
8
|
|
8
9
|
import rich
|
@@ -218,12 +219,12 @@ def get_deployed_tools_agents_and_knowledge_bases():
|
|
218
219
|
return {"tools": all_tools, "collaborators": all_agents, "knowledge_bases": all_knowledge_bases}
|
219
220
|
|
220
221
|
|
221
|
-
def pre_cpe_step(cpe_client):
|
222
|
+
def pre_cpe_step(cpe_client, chat_llm):
|
222
223
|
tools_agents_and_knowledge_bases = get_deployed_tools_agents_and_knowledge_bases()
|
223
224
|
user_message = ""
|
224
225
|
with _get_progress_spinner() as progress:
|
225
226
|
task = progress.add_task(description="Initializing Prompt Engine", total=None)
|
226
|
-
response = cpe_client.submit_pre_cpe_chat(user_message=user_message)
|
227
|
+
response = cpe_client.submit_pre_cpe_chat(chat_llm=chat_llm, user_message=user_message)
|
227
228
|
progress.remove_task(task)
|
228
229
|
|
229
230
|
res = {}
|
@@ -258,7 +259,7 @@ def pre_cpe_step(cpe_client):
|
|
258
259
|
return res
|
259
260
|
with _get_progress_spinner() as progress:
|
260
261
|
task = progress.add_task(description="Thinking...", total=None)
|
261
|
-
response = cpe_client.submit_pre_cpe_chat(
|
262
|
+
response = cpe_client.submit_pre_cpe_chat(chat_llm=chat_llm,**message_content)
|
262
263
|
progress.remove_task(task)
|
263
264
|
|
264
265
|
|
@@ -314,7 +315,7 @@ def gather_examples(samples_file=None):
|
|
314
315
|
return examples
|
315
316
|
|
316
317
|
|
317
|
-
def talk_to_cpe(cpe_client, samples_file=None, context_data=None):
|
318
|
+
def talk_to_cpe(cpe_client, chat_llm, samples_file=None, context_data=None):
|
318
319
|
context_data = context_data or {}
|
319
320
|
examples = gather_examples(samples_file)
|
320
321
|
# upload or gather input examples
|
@@ -322,7 +323,7 @@ def talk_to_cpe(cpe_client, samples_file=None, context_data=None):
|
|
322
323
|
response = None
|
323
324
|
with _get_progress_spinner() as progress:
|
324
325
|
task = progress.add_task(description="Thinking...", total=None)
|
325
|
-
response = cpe_client.init_with_context(context_data=context_data)
|
326
|
+
response = cpe_client.init_with_context(chat_llm=chat_llm, context_data=context_data)
|
326
327
|
progress.remove_task(task)
|
327
328
|
accepted_prompt = None
|
328
329
|
while accepted_prompt is None:
|
@@ -334,13 +335,13 @@ def talk_to_cpe(cpe_client, samples_file=None, context_data=None):
|
|
334
335
|
message = Prompt.ask("\n👤 You").strip()
|
335
336
|
with _get_progress_spinner() as progress:
|
336
337
|
task = progress.add_task(description="Thinking...", total=None)
|
337
|
-
response = cpe_client.invoke(prompt=message)
|
338
|
+
response = cpe_client.invoke(chat_llm=chat_llm, prompt=message)
|
338
339
|
progress.remove_task(task)
|
339
340
|
|
340
341
|
return accepted_prompt
|
341
342
|
|
342
343
|
|
343
|
-
def prompt_tune(agent_spec: str, output_file: str | None, samples_file: str | None, dry_run_flag: bool) -> None:
|
344
|
+
def prompt_tune(agent_spec: str, chat_llm: str | None, output_file: str | None, samples_file: str | None, dry_run_flag: bool) -> None:
|
344
345
|
agent = AgentsController.import_agent(file=agent_spec, app_id=None)[0]
|
345
346
|
agent_kind = agent.kind
|
346
347
|
|
@@ -353,6 +354,7 @@ def prompt_tune(agent_spec: str, output_file: str | None, samples_file: str | No
|
|
353
354
|
output_file = agent_spec
|
354
355
|
|
355
356
|
_validate_output_file(output_file, dry_run_flag)
|
357
|
+
_validate_chat_llm(chat_llm)
|
356
358
|
|
357
359
|
client = get_cpe_client()
|
358
360
|
|
@@ -365,6 +367,7 @@ def prompt_tune(agent_spec: str, output_file: str | None, samples_file: str | No
|
|
365
367
|
knowledge_bases = _get_knowledge_bases_from_names(agent.knowledge_base)
|
366
368
|
try:
|
367
369
|
new_prompt = talk_to_cpe(cpe_client=client,
|
370
|
+
chat_llm=chat_llm,
|
368
371
|
samples_file=samples_file,
|
369
372
|
context_data={
|
370
373
|
"initial_instruction": instr,
|
@@ -387,21 +390,27 @@ def prompt_tune(agent_spec: str, output_file: str | None, samples_file: str | No
|
|
387
390
|
agent.instructions = new_prompt
|
388
391
|
|
389
392
|
if dry_run_flag:
|
390
|
-
rich.print(agent.model_dump(exclude_none=True))
|
393
|
+
rich.print(agent.model_dump(exclude_none=True, mode="json"))
|
391
394
|
else:
|
392
395
|
if os.path.dirname(output_file):
|
393
396
|
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
394
397
|
AgentsController.persist_record(agent, output_file=output_file)
|
395
398
|
|
399
|
+
def _validate_chat_llm(chat_llm):
|
400
|
+
if chat_llm:
|
401
|
+
formatted_chat_llm = re.sub(r'[^a-zA-Z0-9/]', '-', chat_llm)
|
402
|
+
if "llama-3-3-70b-instruct" not in formatted_chat_llm:
|
403
|
+
raise BadRequest(f"Unsupported chat model for copilot {chat_llm}. Copilot supports only llama-3-3-70b-instruct at this point.")
|
396
404
|
|
397
|
-
def create_agent(output_file: str, llm: str, samples_file: str | None, dry_run_flag: bool = False) -> None:
|
405
|
+
def create_agent(output_file: str, llm: str, chat_llm: str | None, samples_file: str | None, dry_run_flag: bool = False) -> None:
|
398
406
|
_validate_output_file(output_file, dry_run_flag)
|
407
|
+
_validate_chat_llm(chat_llm)
|
399
408
|
# 1. prepare the clients
|
400
409
|
cpe_client = get_cpe_client()
|
401
410
|
|
402
411
|
# 2. Pre-CPE stage:
|
403
412
|
try:
|
404
|
-
res = pre_cpe_step(cpe_client)
|
413
|
+
res = pre_cpe_step(cpe_client, chat_llm=chat_llm)
|
405
414
|
except ConnectionError:
|
406
415
|
logger.error(
|
407
416
|
"Failed to connect to Copilot server. Please ensure Copilot is running via `orchestrate copilot start`")
|
@@ -419,7 +428,7 @@ def create_agent(output_file: str, llm: str, samples_file: str | None, dry_run_f
|
|
419
428
|
agent_style = res["agent_style"]
|
420
429
|
|
421
430
|
# 4. discuss the instructions
|
422
|
-
instructions = talk_to_cpe(cpe_client, samples_file,
|
431
|
+
instructions = talk_to_cpe(cpe_client, chat_llm, samples_file,
|
423
432
|
{'description': description, 'tools': tools, 'collaborators': collaborators,
|
424
433
|
'knowledge_bases': knowledge_bases})
|
425
434
|
|
@@ -438,7 +447,7 @@ def create_agent(output_file: str, llm: str, samples_file: str | None, dry_run_f
|
|
438
447
|
agent.spec_version = SpecVersion.V1
|
439
448
|
|
440
449
|
if dry_run_flag:
|
441
|
-
rich.print(agent.model_dump(exclude_none=True))
|
450
|
+
rich.print(agent.model_dump(exclude_none=True, mode="json"))
|
442
451
|
return
|
443
452
|
|
444
453
|
if os.path.dirname(output_file):
|
@@ -493,7 +502,8 @@ def _suggest_sorted(user_input: str, options: List[str]) -> List[str]:
|
|
493
502
|
return sorted(options, key=lambda x: difflib.SequenceMatcher(None, user_input, x).ratio(), reverse=True)
|
494
503
|
|
495
504
|
|
496
|
-
def refine_agent_with_trajectories(agent_name: str,
|
505
|
+
def refine_agent_with_trajectories(agent_name: str, chat_llm: str | None, output_file: str | None,
|
506
|
+
use_last_chat: bool=False, dry_run_flag: bool = False) -> None:
|
497
507
|
"""
|
498
508
|
Refines an existing agent's instructions using user selected chat trajectories and saves the updated agent configuration.
|
499
509
|
|
@@ -510,6 +520,7 @@ def refine_agent_with_trajectories(agent_name: str, output_file: str | None, use
|
|
510
520
|
|
511
521
|
Parameters:
|
512
522
|
agent_name (str): The name of the agent to refine.
|
523
|
+
chat_llm (str): The name of the model used by the refiner. If None, default model (llama-3-3-70b) is used.
|
513
524
|
output_file (str): Path to the file where the refined agent configuration will be saved.
|
514
525
|
use_last_chat(bool): If true, optimize by using the last conversation with the agent, otherwise let the use choose
|
515
526
|
dry_run_flag (bool): If True, prints the refined agent configuration without saving it to disk.
|
@@ -519,6 +530,7 @@ def refine_agent_with_trajectories(agent_name: str, output_file: str | None, use
|
|
519
530
|
"""
|
520
531
|
|
521
532
|
_validate_output_file(output_file, dry_run_flag)
|
533
|
+
_validate_chat_llm(chat_llm)
|
522
534
|
agents_controller = AgentsController()
|
523
535
|
agents_client = get_native_client()
|
524
536
|
threads_client = get_threads_client()
|
@@ -535,9 +547,6 @@ def refine_agent_with_trajectories(agent_name: str, output_file: str | None, use
|
|
535
547
|
f'Available agents:\n'
|
536
548
|
f'{available_sorted_str}')
|
537
549
|
|
538
|
-
rich.print(Panel(message, title="Agent Lookup", border_style="blue"))
|
539
|
-
return
|
540
|
-
|
541
550
|
cpe_client = get_cpe_client()
|
542
551
|
# Step 2 - retrieve chats (threads)
|
543
552
|
try:
|
@@ -550,7 +559,6 @@ def refine_agent_with_trajectories(agent_name: str, output_file: str | None, use
|
|
550
559
|
raise BadRequest(
|
551
560
|
f"No chats found for agent '{agent_name}'. To use autotune, please initiate at least one conversation with the agent. You can start a chat using `orchestrate chat start`.",
|
552
561
|
)
|
553
|
-
return
|
554
562
|
last_10_threads = all_threads[:10] #TODO use batching when server allows
|
555
563
|
last_10_chats = [_format_thread_messages(chat) for chat in
|
556
564
|
threads_client.get_threads_messages([thread['id'] for thread in last_10_threads])]
|
@@ -621,8 +629,9 @@ def refine_agent_with_trajectories(agent_name: str, output_file: str | None, use
|
|
621
629
|
knowledge_bases = _get_knowledge_bases_from_names(agent.knowledge_base)
|
622
630
|
if agent.instructions is None:
|
623
631
|
raise BadRequest("Agent must have instructions in order to use the autotune command. To build an instruction use `orchestrate copilot prompt-tune -f <path_to_agent_yaml> -o <path_to_new_agent_yaml>`")
|
624
|
-
response = cpe_client.refine_agent_with_chats(agent.instructions,
|
625
|
-
knowledge_bases=knowledge_bases,
|
632
|
+
response = cpe_client.refine_agent_with_chats(instruction=agent.instructions, chat_llm=chat_llm, tools=tools,
|
633
|
+
collaborators=collaborators, knowledge_bases=knowledge_bases,
|
634
|
+
trajectories_with_feedback=threads_messages)
|
626
635
|
progress.remove_task(task)
|
627
636
|
progress.refresh()
|
628
637
|
except ConnectionError:
|
@@ -638,7 +647,7 @@ def refine_agent_with_trajectories(agent_name: str, output_file: str | None, use
|
|
638
647
|
agent.instructions = response['instruction']
|
639
648
|
|
640
649
|
if dry_run_flag:
|
641
|
-
rich.print(agent.model_dump(exclude_none=True))
|
650
|
+
rich.print(agent.model_dump(exclude_none=True, mode="json"))
|
642
651
|
return
|
643
652
|
|
644
653
|
if os.path.dirname(output_file):
|
@@ -17,10 +17,13 @@ from typing_extensions import Annotated
|
|
17
17
|
|
18
18
|
from ibm_watsonx_orchestrate import __version__
|
19
19
|
from ibm_watsonx_orchestrate.cli.commands.evaluations.evaluations_controller import EvaluationsController, EvaluateMode
|
20
|
+
from ibm_watsonx_orchestrate.cli.commands.evaluations.evaluations_environment_manager import run_environment_manager
|
20
21
|
from ibm_watsonx_orchestrate.cli.commands.agents.agents_controller import AgentsController
|
21
22
|
|
22
23
|
logger = logging.getLogger(__name__)
|
23
24
|
|
25
|
+
HIDE_ENVIRONMENT_MGR_PANEL = os.environ.get("HIDE_ENVIRONMENT_MGR_PANEL", "true").lower() == "true"
|
26
|
+
|
24
27
|
evaluation_app = typer.Typer(no_args_is_help=True)
|
25
28
|
|
26
29
|
def _native_agent_template():
|
@@ -142,14 +145,38 @@ def evaluate(
|
|
142
145
|
"--env-file", "-e",
|
143
146
|
help="Path to a .env file that overrides default.env. Then environment variables override both."
|
144
147
|
),
|
145
|
-
] = None
|
148
|
+
] = None,
|
149
|
+
env_manager_path: Annotated[
|
150
|
+
Optional[str],
|
151
|
+
typer.Option(
|
152
|
+
"--env-manager-path",
|
153
|
+
help="""
|
154
|
+
Path to YAML configuration file containing environment settings.\n
|
155
|
+
See `./examples/evaluations/environment_manager` on how to create the environment manager file.
|
156
|
+
Note: When using this feature, you must pass the `output_dir`.
|
157
|
+
""",
|
158
|
+
rich_help_panel="Environment Manager",
|
159
|
+
hidden=HIDE_ENVIRONMENT_MGR_PANEL
|
160
|
+
)
|
161
|
+
] = None,
|
146
162
|
):
|
163
|
+
validate_watsonx_credentials(user_env_file)
|
164
|
+
|
165
|
+
if env_manager_path:
|
166
|
+
if output_dir:
|
167
|
+
return run_environment_manager(
|
168
|
+
environment_manager_path=env_manager_path,
|
169
|
+
output_dir=output_dir,
|
170
|
+
)
|
171
|
+
else:
|
172
|
+
logger.error("Error: `--env_manager_path`, `--output_dir` must be provided to use the environment manager feature.")
|
173
|
+
sys.exit(1)
|
174
|
+
|
147
175
|
if not config_file:
|
148
176
|
if not test_paths or not output_dir:
|
149
177
|
logger.error("Error: Both --test-paths and --output-dir must be provided when not using a config file")
|
150
178
|
exit(1)
|
151
|
-
|
152
|
-
validate_watsonx_credentials(user_env_file)
|
179
|
+
|
153
180
|
controller = EvaluationsController()
|
154
181
|
controller.evaluate(config_file=config_file, test_paths=test_paths, output_dir=output_dir)
|
155
182
|
|
@@ -0,0 +1,158 @@
|
|
1
|
+
import logging
|
2
|
+
import yaml
|
3
|
+
from typing import Mapping, Any
|
4
|
+
from enum import StrEnum
|
5
|
+
from pathlib import Path
|
6
|
+
|
7
|
+
from ibm_watsonx_orchestrate.cli.commands.agents.agents_controller import (
|
8
|
+
AgentsController,
|
9
|
+
Agent,
|
10
|
+
ExternalAgent,
|
11
|
+
AssistantAgent,
|
12
|
+
)
|
13
|
+
from ibm_watsonx_orchestrate.cli.commands.tools.tools_controller import (
|
14
|
+
ToolsController,
|
15
|
+
BaseTool,
|
16
|
+
)
|
17
|
+
from ibm_watsonx_orchestrate.cli.commands.knowledge_bases.knowledge_bases_controller import (
|
18
|
+
KnowledgeBaseController,
|
19
|
+
KnowledgeBase,
|
20
|
+
)
|
21
|
+
from ibm_watsonx_orchestrate.cli.commands.knowledge_bases.knowledge_bases_controller import (
|
22
|
+
parse_file as kb_parse_file,
|
23
|
+
)
|
24
|
+
from ibm_watsonx_orchestrate.cli.commands.evaluations.evaluations_controller import (
|
25
|
+
EvaluationsController,
|
26
|
+
EvaluateMode,
|
27
|
+
)
|
28
|
+
|
29
|
+
|
30
|
+
logger = logging.getLogger(__name__)
|
31
|
+
|
32
|
+
|
33
|
+
class ArtifactTypes(StrEnum):
|
34
|
+
"""The allowed artifacts in the environment manager path.
|
35
|
+
|
36
|
+
The environment manager config looks like this:
|
37
|
+
```json
|
38
|
+
env1:
|
39
|
+
agent:
|
40
|
+
agents_path: None
|
41
|
+
tools:
|
42
|
+
tools_path: None
|
43
|
+
tool_kind: None
|
44
|
+
# any other tool flags
|
45
|
+
knowledge:
|
46
|
+
knowledge_base_path: None
|
47
|
+
test_config: # path to config.yaml
|
48
|
+
clean_up: True
|
49
|
+
```
|
50
|
+
The allowed artifacts/keys are "agent", "tools", "knowledge"
|
51
|
+
"""
|
52
|
+
|
53
|
+
agent = "agent"
|
54
|
+
tools = "tools"
|
55
|
+
knowledge = "knowledge"
|
56
|
+
|
57
|
+
|
58
|
+
class TestCaseManager:
|
59
|
+
def __init__(
|
60
|
+
self,
|
61
|
+
env_settings: Mapping[str, Any],
|
62
|
+
output_dir: str,
|
63
|
+
mode: EvaluateMode = EvaluateMode.default,
|
64
|
+
):
|
65
|
+
self.env_settings = env_settings
|
66
|
+
self.cleanup = env_settings.get("clean_up", False)
|
67
|
+
self.output_dir = output_dir
|
68
|
+
self.mode = mode
|
69
|
+
|
70
|
+
self.agent_controller = AgentsController()
|
71
|
+
self.knowledge_controller = KnowledgeBaseController()
|
72
|
+
self.tool_controller = None
|
73
|
+
if (tool_settings := env_settings.get(ArtifactTypes.tools)):
|
74
|
+
self.tool_controller = ToolsController(
|
75
|
+
tool_kind=tool_settings.get("kind"),
|
76
|
+
file=tool_settings.get("file"),
|
77
|
+
requirements_file=tool_settings.get("requirements_file")
|
78
|
+
)
|
79
|
+
|
80
|
+
self.imported_artifacts = []
|
81
|
+
|
82
|
+
def __enter__(self):
|
83
|
+
for artifact in [
|
84
|
+
ArtifactTypes.tools,
|
85
|
+
ArtifactTypes.knowledge,
|
86
|
+
ArtifactTypes.agent,
|
87
|
+
]:
|
88
|
+
if artifact not in self.env_settings:
|
89
|
+
continue
|
90
|
+
|
91
|
+
artifact_settings = self.env_settings.get(artifact)
|
92
|
+
if artifact == ArtifactTypes.tools:
|
93
|
+
tools = ToolsController.import_tool(**artifact_settings)
|
94
|
+
# import_tool returns Iterator[BaseTool], copy the iterator into a list for preservation
|
95
|
+
# this is needed if user wants environment cleanup
|
96
|
+
tools = [tool for tool in tools]
|
97
|
+
self.imported_artifacts.append(tools)
|
98
|
+
self.tool_controller.publish_or_update_tools(tools)
|
99
|
+
elif artifact == ArtifactTypes.knowledge:
|
100
|
+
KnowledgeBaseController.import_knowledge_base(**artifact_settings)
|
101
|
+
kb_spec = kb_parse_file(artifact_settings.get("file"))
|
102
|
+
self.imported_artifacts.append(kb_spec)
|
103
|
+
elif artifact == ArtifactTypes.agent:
|
104
|
+
artifact_settings["app_id"] = artifact_settings.get("app_id", None)
|
105
|
+
agents = AgentsController.import_agent(**artifact_settings)
|
106
|
+
self.agent_controller.publish_or_update_agents(agents)
|
107
|
+
self.imported_artifacts.append(agents)
|
108
|
+
|
109
|
+
eval = EvaluationsController()
|
110
|
+
eval.evaluate(
|
111
|
+
test_paths=self.env_settings.get("test_paths"),
|
112
|
+
output_dir=self.output_dir,
|
113
|
+
mode=self.mode,
|
114
|
+
)
|
115
|
+
|
116
|
+
return self
|
117
|
+
|
118
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
119
|
+
if self.cleanup:
|
120
|
+
logger.info("Cleaning environment")
|
121
|
+
for artifact in self.imported_artifacts:
|
122
|
+
# artifact can be a list of agents, tools
|
123
|
+
for item in artifact:
|
124
|
+
if isinstance(item, BaseTool):
|
125
|
+
self.tool_controller.remove_tool(item.__tool_spec__.name)
|
126
|
+
if isinstance(item, KnowledgeBase):
|
127
|
+
self.knowledge_controller.remove_knowledge_base(
|
128
|
+
item.id, item.name
|
129
|
+
)
|
130
|
+
if isinstance(item, (Agent, AssistantAgent, ExternalAgent)):
|
131
|
+
self.agent_controller.remove_agent(item.name, item.kind)
|
132
|
+
|
133
|
+
|
134
|
+
def run_environment_manager(
|
135
|
+
environment_manager_path: str,
|
136
|
+
mode: EvaluateMode = EvaluateMode.default,
|
137
|
+
output_dir: str = None,
|
138
|
+
):
|
139
|
+
with open(environment_manager_path, encoding="utf-8", mode="r") as f:
|
140
|
+
env_settings = yaml.load(f, Loader=yaml.SafeLoader)
|
141
|
+
|
142
|
+
for env in env_settings:
|
143
|
+
if not env_settings.get(env).get("enabled"):
|
144
|
+
continue
|
145
|
+
results_folder = Path(output_dir) / env
|
146
|
+
results_folder.mkdir(parents=True, exist_ok=True)
|
147
|
+
logger.info(
|
148
|
+
"Processing environment: '%s'. Results will be saved to '%s'",
|
149
|
+
env,
|
150
|
+
results_folder,
|
151
|
+
)
|
152
|
+
|
153
|
+
with TestCaseManager(
|
154
|
+
env_settings=env_settings.get(env),
|
155
|
+
output_dir=str(results_folder),
|
156
|
+
mode=mode,
|
157
|
+
):
|
158
|
+
logger.info("Finished evaluation for environment: '%s'", env)
|
@@ -59,3 +59,29 @@ def knowledge_base_status(
|
|
59
59
|
):
|
60
60
|
controller = KnowledgeBaseController()
|
61
61
|
controller.knowledge_base_status(id=id, name=name)
|
62
|
+
|
63
|
+
@knowledge_bases_app.command(name="export", help='Export a knowledge base spec to a yaml')
|
64
|
+
def knowledge_base_export(
|
65
|
+
output_file: Annotated[
|
66
|
+
str,
|
67
|
+
typer.Option(
|
68
|
+
"--output",
|
69
|
+
"-o",
|
70
|
+
help="Path to a where the zip file containing the exported data should be saved",
|
71
|
+
),
|
72
|
+
],
|
73
|
+
name: Annotated[
|
74
|
+
str,
|
75
|
+
typer.Option("--name", "-n", help="The name of the knowledge base you want to export"),
|
76
|
+
]=None,
|
77
|
+
id: Annotated[
|
78
|
+
str,
|
79
|
+
typer.Option("--id", "-i", help="The ID of the knowledge base you wish export"),
|
80
|
+
]=None,
|
81
|
+
):
|
82
|
+
controller = KnowledgeBaseController()
|
83
|
+
controller.knowledge_base_export(
|
84
|
+
id=id,
|
85
|
+
name=name,
|
86
|
+
output_path=output_file
|
87
|
+
)
|
@@ -5,8 +5,11 @@ import requests
|
|
5
5
|
import logging
|
6
6
|
import importlib
|
7
7
|
import inspect
|
8
|
+
import yaml
|
8
9
|
from pathlib import Path
|
9
|
-
from typing import List, Any
|
10
|
+
from typing import List, Any, Optional
|
11
|
+
from zipfile import ZipFile
|
12
|
+
from io import BytesIO
|
10
13
|
|
11
14
|
from ibm_watsonx_orchestrate.agent_builder.knowledge_bases.knowledge_base import KnowledgeBase
|
12
15
|
from ibm_watsonx_orchestrate.client.knowledge_bases.knowledge_base_client import KnowledgeBaseClient
|
@@ -15,6 +18,7 @@ from ibm_watsonx_orchestrate.client.connections import get_connections_client
|
|
15
18
|
from ibm_watsonx_orchestrate.client.utils import instantiate_client
|
16
19
|
from ibm_watsonx_orchestrate.agent_builder.knowledge_bases.types import FileUpload, KnowledgeBaseListEntry
|
17
20
|
from ibm_watsonx_orchestrate.cli.common import ListFormats, rich_table_to_markdown
|
21
|
+
from ibm_watsonx_orchestrate.agent_builder.knowledge_bases.types import KnowledgeBaseKind, IndexConnection, SpecVersion
|
18
22
|
|
19
23
|
logger = logging.getLogger(__name__)
|
20
24
|
|
@@ -64,6 +68,32 @@ def build_file_object(file_dir: str, file: str | FileUpload):
|
|
64
68
|
return ('files', (get_file_name(file.path), open(get_relative_file_path(file.path, file_dir), 'rb')))
|
65
69
|
return ('files', (get_file_name(file), open(get_relative_file_path(file, file_dir), 'rb')))
|
66
70
|
|
71
|
+
def build_connections_map(key_attr: str) -> dict:
|
72
|
+
connections_client = get_connections_client()
|
73
|
+
connections = connections_client.list()
|
74
|
+
|
75
|
+
return {getattr(conn, key_attr): conn for conn in connections}
|
76
|
+
|
77
|
+
def get_index_config(kb: KnowledgeBase, index: int = 0) -> IndexConnection | None:
|
78
|
+
if kb.conversational_search_tool is not None \
|
79
|
+
and kb.conversational_search_tool.index_config is not None \
|
80
|
+
and len(kb.conversational_search_tool.index_config) > index:
|
81
|
+
|
82
|
+
return kb.conversational_search_tool.index_config[index]
|
83
|
+
return None
|
84
|
+
|
85
|
+
def get_kb_app_id(kb: KnowledgeBase) -> str | None:
|
86
|
+
index_config = get_index_config(kb)
|
87
|
+
if not index_config:
|
88
|
+
return
|
89
|
+
return index_config.app_id
|
90
|
+
|
91
|
+
def get_kb_connection_id(kb: KnowledgeBase) -> str | None:
|
92
|
+
index_config = get_index_config(kb)
|
93
|
+
if not index_config:
|
94
|
+
return
|
95
|
+
return index_config.connection_id
|
96
|
+
|
67
97
|
class KnowledgeBaseController:
|
68
98
|
def __init__(self):
|
69
99
|
self.client = None
|
@@ -79,24 +109,23 @@ class KnowledgeBaseController:
|
|
79
109
|
|
80
110
|
knowledge_bases = parse_file(file=file)
|
81
111
|
|
82
|
-
|
83
|
-
connections_client = get_connections_client()
|
84
|
-
connection_id = None
|
85
|
-
|
86
|
-
connections = connections_client.get_draft_by_app_id(app_id=app_id)
|
87
|
-
if not connections:
|
88
|
-
logger.error(f"No connection exists with the app-id '{app_id}'")
|
89
|
-
exit(1)
|
90
|
-
|
91
|
-
connection_id = connections.connection_id
|
92
|
-
|
93
|
-
for kb in knowledge_bases:
|
94
|
-
if kb.conversational_search_tool and kb.conversational_search_tool.index_config and len(kb.conversational_search_tool.index_config) > 0:
|
95
|
-
kb.conversational_search_tool.index_config[0].connection_id = connection_id
|
112
|
+
connections_map = None
|
96
113
|
|
97
114
|
existing_knowledge_bases = client.get_by_names([kb.name for kb in knowledge_bases])
|
98
115
|
|
99
116
|
for kb in knowledge_bases:
|
117
|
+
app_id = app_id if app_id else get_kb_app_id(kb)
|
118
|
+
if app_id:
|
119
|
+
if not connections_map:
|
120
|
+
connections_map = build_connections_map("app_id")
|
121
|
+
conn = connections_map.get(app_id)
|
122
|
+
if conn:
|
123
|
+
index_config = get_index_config(kb)
|
124
|
+
if index_config:
|
125
|
+
index_config.connection_id = conn.connection_id
|
126
|
+
else:
|
127
|
+
logger.error(f"No connection exists with the app-id '{app_id}'")
|
128
|
+
exit(1)
|
100
129
|
try:
|
101
130
|
file_dir = "/".join(file.split("/")[:-1])
|
102
131
|
|
@@ -265,19 +294,13 @@ class KnowledgeBaseController:
|
|
265
294
|
for column in column_args:
|
266
295
|
table.add_column(column, **column_args[column])
|
267
296
|
|
268
|
-
|
269
|
-
connections = connections_client.list()
|
270
|
-
|
271
|
-
connections_dict = {conn.connection_id: conn for conn in connections}
|
297
|
+
connections_dict = build_connections_map("connection_id")
|
272
298
|
|
273
299
|
for kb in knowledge_bases:
|
274
300
|
app_id = ""
|
275
|
-
|
276
|
-
if
|
277
|
-
|
278
|
-
and len(kb.conversational_search_tool.index_config) > 0 \
|
279
|
-
and kb.conversational_search_tool.index_config[0].connection_id is not None:
|
280
|
-
conn = connections_dict.get(kb.conversational_search_tool.index_config[0].connection_id)
|
301
|
+
connection_id = get_kb_connection_id(kb)
|
302
|
+
if connection_id is not None:
|
303
|
+
conn = connections_dict.get(connection_id)
|
281
304
|
if conn:
|
282
305
|
app_id = conn.app_id
|
283
306
|
|
@@ -312,4 +335,68 @@ class KnowledgeBaseController:
|
|
312
335
|
logger.warning(f"No knowledge base {logEnding} found")
|
313
336
|
logger.error(e.response.text)
|
314
337
|
exit(1)
|
338
|
+
|
339
|
+
def get_knowledge_base(self, id) -> KnowledgeBase:
|
340
|
+
client = self.get_client()
|
341
|
+
try:
|
342
|
+
return KnowledgeBase.model_validate(client.get_by_id(id))
|
343
|
+
except requests.HTTPError as e:
|
344
|
+
if e.response.status_code == 404:
|
345
|
+
logger.error(f"No knowledge base {id} found")
|
346
|
+
else:
|
347
|
+
logger.error(e.response.text)
|
348
|
+
exit(1)
|
349
|
+
|
315
350
|
|
351
|
+
def knowledge_base_export(self,
|
352
|
+
output_path: str,
|
353
|
+
id: Optional[str] = None,
|
354
|
+
name: Optional[str] = None,
|
355
|
+
zip_file_out: Optional[ZipFile] = None) -> None:
|
356
|
+
output_file = Path(output_path)
|
357
|
+
output_file_extension = output_file.suffix
|
358
|
+
if output_file_extension not in {".yaml", ".yml"} :
|
359
|
+
logger.error(f"Output file must end with the extension '.yaml'/'.yml'. Provided file '{output_path}' ends with '{output_file_extension}'")
|
360
|
+
sys.exit(1)
|
361
|
+
|
362
|
+
knowledge_base_id = self.get_id(id, name)
|
363
|
+
logEnding = f"with ID '{id}'" if id else f"'{name}'"
|
364
|
+
|
365
|
+
logger.info(f"Exporting spec for knowledge base {logEnding}'")
|
366
|
+
|
367
|
+
knowledge_base = self.get_knowledge_base(knowledge_base_id)
|
368
|
+
|
369
|
+
if not knowledge_base:
|
370
|
+
logger.error(f"Knowledge base'{knowledge_base_id}' not found.'")
|
371
|
+
return
|
372
|
+
|
373
|
+
knowledge_base.tenant_id = None
|
374
|
+
knowledge_base.id = None
|
375
|
+
knowledge_base.spec_version = SpecVersion.V1
|
376
|
+
knowledge_base.kind = KnowledgeBaseKind.KNOWLEDGE_BASE
|
377
|
+
|
378
|
+
connection_id = get_kb_connection_id(knowledge_base)
|
379
|
+
if connection_id:
|
380
|
+
connections_map = build_connections_map("connection_id")
|
381
|
+
conn = connections_map.get(connection_id)
|
382
|
+
if conn:
|
383
|
+
index_config = get_index_config(knowledge_base)
|
384
|
+
index_config.app_id = conn.app_id
|
385
|
+
index_config.connection_id = None
|
386
|
+
else:
|
387
|
+
logger.warning(f"Connection '{connection_id}' not found, unable to resolve app_id for Knowledge base {logEnding}")
|
388
|
+
|
389
|
+
knowledge_base_spec = knowledge_base.model_dump(mode="json", exclude_none=True, exclude_unset=True)
|
390
|
+
if zip_file_out:
|
391
|
+
knowledge_base_spec_yaml = yaml.dump(knowledge_base_spec, sort_keys=False, default_flow_style=False, allow_unicode=True)
|
392
|
+
knowledge_base_spec_yaml_bytes = knowledge_base_spec_yaml.encode("utf-8")
|
393
|
+
knowledge_base_spec_yaml_file = BytesIO(knowledge_base_spec_yaml_bytes)
|
394
|
+
zip_file_out.writestr(
|
395
|
+
output_path,
|
396
|
+
knowledge_base_spec_yaml_file.getvalue()
|
397
|
+
)
|
398
|
+
else:
|
399
|
+
with open(output_path, 'w') as outfile:
|
400
|
+
yaml.dump(knowledge_base_spec, outfile, sort_keys=False, default_flow_style=False, allow_unicode=True)
|
401
|
+
|
402
|
+
logger.info(f"Successfully exported for knowledge base {logEnding} to '{output_path}'")
|
@@ -552,7 +552,6 @@ def run_db_migration() -> None:
|
|
552
552
|
migration_command = f'''
|
553
553
|
APPLIED_MIGRATIONS_FILE="/var/lib/postgresql/applied_migrations/applied_migrations.txt"
|
554
554
|
touch "$APPLIED_MIGRATIONS_FILE"
|
555
|
-
|
556
555
|
for file in /docker-entrypoint-initdb.d/*.sql; do
|
557
556
|
filename=$(basename "$file")
|
558
557
|
|
@@ -568,6 +567,37 @@ def run_db_migration() -> None:
|
|
568
567
|
fi
|
569
568
|
fi
|
570
569
|
done
|
570
|
+
|
571
|
+
# Create wxo_observability database if it doesn't exist
|
572
|
+
if psql -U {pg_user} -lqt | cut -d \\| -f 1 | grep -qw wxo_observability; then
|
573
|
+
echo 'Existing wxo_observability DB found'
|
574
|
+
else
|
575
|
+
echo 'Creating wxo_observability DB'
|
576
|
+
createdb -U "{pg_user}" -O "{pg_user}" wxo_observability;
|
577
|
+
psql -U {pg_user} -q -d postgres -c "GRANT CONNECT ON DATABASE wxo_observability TO {pg_user}";
|
578
|
+
fi
|
579
|
+
|
580
|
+
# Run observability-specific migrations
|
581
|
+
OBSERVABILITY_MIGRATIONS_FILE="/var/lib/postgresql/applied_migrations/observability_migrations.txt"
|
582
|
+
touch "$OBSERVABILITY_MIGRATIONS_FILE"
|
583
|
+
|
584
|
+
for file in /docker-entrypoint-initdb.d/observability/*.sql; do
|
585
|
+
if [ -f "$file" ]; then
|
586
|
+
filename=$(basename "$file")
|
587
|
+
|
588
|
+
if grep -Fxq "$filename" "$OBSERVABILITY_MIGRATIONS_FILE"; then
|
589
|
+
echo "Skipping already applied observability migration: $filename"
|
590
|
+
else
|
591
|
+
echo "Applying observability migration: $filename"
|
592
|
+
if psql -U {pg_user} -d wxo_observability -q -f "$file" > /dev/null 2>&1; then
|
593
|
+
echo "$filename" >> "$OBSERVABILITY_MIGRATIONS_FILE"
|
594
|
+
else
|
595
|
+
echo "Error applying observability migration: $filename. Stopping migrations."
|
596
|
+
exit 1
|
597
|
+
fi
|
598
|
+
fi
|
599
|
+
fi
|
600
|
+
done
|
571
601
|
'''
|
572
602
|
|
573
603
|
cli_config = Config()
|
@@ -131,7 +131,7 @@ class AgentClient(BaseAPIClient):
|
|
131
131
|
agent = transform_agents_to_flat_agent_spec(self._get(f"{self.base_endpoint}/{agent_id}"))
|
132
132
|
return agent
|
133
133
|
except ClientAPIException as e:
|
134
|
-
if e.response.status_code == 404 and "not found with the given name" in e.response.text:
|
134
|
+
if e.response.status_code == 404 and ("not found with the given name" in e.response.text or ("Agent" in e.response.text and "not found" in e.response.text)):
|
135
135
|
return ""
|
136
136
|
raise(e)
|
137
137
|
|
@@ -13,22 +13,26 @@ class CPEClient(BaseAPIClient):
|
|
13
13
|
self.chat_id = str(uuid4())
|
14
14
|
super().__init__(*args, **kwargs)
|
15
15
|
self.base_url = kwargs.get("base_url", self.base_url)
|
16
|
-
self.chat_model_name = 'llama-3-3-70b-instruct'
|
17
16
|
|
18
17
|
def _get_headers(self) -> dict:
|
19
18
|
return {
|
20
19
|
"chat_id": self.chat_id
|
21
20
|
}
|
22
21
|
|
22
|
+
def _get_chat_model_name_or_default(self, chat_nodel_name):
|
23
|
+
if chat_nodel_name:
|
24
|
+
return chat_nodel_name
|
25
|
+
return 'watsonx/meta-llama/llama-3-3-70b-instruct'
|
23
26
|
|
24
|
-
def submit_pre_cpe_chat(self,
|
27
|
+
def submit_pre_cpe_chat(self, chat_llm: str |None, user_message: str | None =None,
|
28
|
+
tools: Dict[str, Any] = None, collaborators: Dict[str, Any] = None, knowledge_bases: Dict[str, Any] = None, selected:bool=False) -> dict:
|
25
29
|
payload = {
|
26
30
|
"message": user_message,
|
27
31
|
"tools": tools,
|
28
32
|
"collaborators": collaborators,
|
29
33
|
"knowledge_bases": knowledge_bases,
|
30
34
|
"chat_id": self.chat_id,
|
31
|
-
"chat_model_name": self.
|
35
|
+
"chat_model_name": self._get_chat_model_name_or_default(chat_llm),
|
32
36
|
'selected':selected
|
33
37
|
}
|
34
38
|
|
@@ -37,7 +41,8 @@ class CPEClient(BaseAPIClient):
|
|
37
41
|
if response:
|
38
42
|
return response[-1]
|
39
43
|
|
40
|
-
def refine_agent_with_chats(self, instruction: str,
|
44
|
+
def refine_agent_with_chats(self, instruction: str, chat_llm: str | None,
|
45
|
+
tools: Dict[str, Any], collaborators: Dict[str, Any], knowledge_bases: Dict[str, Any], trajectories_with_feedback: List[List[dict]], model: str | None = None) -> dict:
|
41
46
|
"""
|
42
47
|
Refines an agent's instruction using provided chat trajectories and optional model name.
|
43
48
|
This method sends a payload containing the agent's current instruction and a list of chat trajectories
|
@@ -45,6 +50,7 @@ class CPEClient(BaseAPIClient):
|
|
45
50
|
Optionally, a target model name can be specified to use in the refinement process.
|
46
51
|
Parameters:
|
47
52
|
instruction (str): The current instruction or prompt associated with the agent.
|
53
|
+
chat_llm(str): The name of the chat model
|
48
54
|
tools (Dict[str, Any]) - a dictionary containing the selected tools
|
49
55
|
collaborators (Dict[str, Any]) - a dictionary containing the selected collaborators
|
50
56
|
knowledge_bases (Dict[str, Any]) - a dictionary containing the selected knowledge_bases
|
@@ -60,7 +66,8 @@ class CPEClient(BaseAPIClient):
|
|
60
66
|
"instruction":instruction,
|
61
67
|
"tools": tools,
|
62
68
|
"collaborators": collaborators,
|
63
|
-
"knowledge_bases": knowledge_bases
|
69
|
+
"knowledge_bases": knowledge_bases,
|
70
|
+
"chat_model_name": self._get_chat_model_name_or_default(chat_llm),
|
64
71
|
}
|
65
72
|
|
66
73
|
if model:
|
@@ -71,14 +78,16 @@ class CPEClient(BaseAPIClient):
|
|
71
78
|
if response:
|
72
79
|
return response[-1]
|
73
80
|
|
74
|
-
def init_with_context(self,
|
81
|
+
def init_with_context(self, chat_llm: str | None,
|
82
|
+
target_model_name: str | None = None, context_data: Dict[str, Any] = None) -> dict:
|
75
83
|
payload = {
|
76
84
|
"context_data": context_data,
|
77
|
-
"chat_id": self.chat_id
|
85
|
+
"chat_id": self.chat_id,
|
86
|
+
"chat_model_name": self._get_chat_model_name_or_default(chat_llm),
|
78
87
|
}
|
79
88
|
|
80
|
-
if
|
81
|
-
payload["target_model_name"] =
|
89
|
+
if target_model_name:
|
90
|
+
payload["target_model_name"] = target_model_name
|
82
91
|
|
83
92
|
response = self._post_nd_json("/wxo-cpe/init_cpe_from_wxo", data=payload)
|
84
93
|
|
@@ -86,15 +95,17 @@ class CPEClient(BaseAPIClient):
|
|
86
95
|
return response[-1]
|
87
96
|
|
88
97
|
|
89
|
-
def invoke(self, prompt: str,
|
98
|
+
def invoke(self, prompt: str, chat_llm: str| None,
|
99
|
+
target_model_name: str | None = None, context_data: Dict[str, Any] = None) -> dict:
|
90
100
|
payload = {
|
91
101
|
"prompt": prompt,
|
92
102
|
"context_data": context_data,
|
93
|
-
"chat_id": self.chat_id
|
103
|
+
"chat_id": self.chat_id,
|
104
|
+
"chat_model_name": self._get_chat_model_name_or_default(chat_llm),
|
94
105
|
}
|
95
106
|
|
96
|
-
if
|
97
|
-
payload["target_model_name"] =
|
107
|
+
if target_model_name:
|
108
|
+
payload["target_model_name"] = target_model_name
|
98
109
|
|
99
110
|
response = self._post_nd_json("/wxo-cpe/invoke", data=payload)
|
100
111
|
|
@@ -12,7 +12,7 @@ class TempusClient(BaseAPIClient):
|
|
12
12
|
This may be temporary and may want to create a proxy API in wxo-server
|
13
13
|
to redirect to the internal tempus runtime, and add a new operation in the ToolClient instead
|
14
14
|
"""
|
15
|
-
def __init__(self, base_url: str, api_key: str = None, is_local: bool = False, authenticator: MCSPAuthenticator = None):
|
15
|
+
def __init__(self, base_url: str, api_key: str = None, is_local: bool = False, authenticator: MCSPAuthenticator = None, *args, **kwargs):
|
16
16
|
parsed_url = urlparse(base_url)
|
17
17
|
|
18
18
|
# Reconstruct netloc with new port - use default above - eventually we need to open up a way through the wxo-server API
|
@@ -26,7 +26,9 @@ class TempusClient(BaseAPIClient):
|
|
26
26
|
base_url=new_url,
|
27
27
|
api_key=api_key,
|
28
28
|
is_local=is_local,
|
29
|
-
authenticator=authenticator
|
29
|
+
authenticator=authenticator,
|
30
|
+
*args,
|
31
|
+
**kwargs
|
30
32
|
)
|
31
33
|
|
32
34
|
def get_tempus_endpoint(self) -> str:
|
@@ -73,13 +73,12 @@ AGENT_GATEWAY_REGISTRY=
|
|
73
73
|
DB_REGISTRY=
|
74
74
|
# If you build multiarch set all three of these to the same, we have a pr against main
|
75
75
|
# to not have this separation, but we can merge it later
|
76
|
-
DBTAG=
|
77
|
-
AMDDBTAG=
|
78
|
-
ARM64DBTAG=
|
76
|
+
DBTAG=06-10-2025-87419d6
|
77
|
+
AMDDBTAG=06-10-2025-87419d6
|
78
|
+
ARM64DBTAG=06-10-2025-87419d6
|
79
79
|
|
80
80
|
UI_REGISTRY=
|
81
|
-
UITAG=
|
82
|
-
|
81
|
+
UITAG=10-10-2025-f91ab60
|
83
82
|
CM_REGISTRY=
|
84
83
|
CM_TAG=16-09-2025-e33b344
|
85
84
|
|
@@ -92,10 +91,10 @@ TRM_REGISTRY=
|
|
92
91
|
TR_TAG=24-09-2025-a515038
|
93
92
|
TR_REGISTRY=
|
94
93
|
|
95
|
-
BUILDER_TAG=
|
94
|
+
BUILDER_TAG=06-10-2025-6387930
|
96
95
|
BUILDER_REGISTRY=
|
97
96
|
|
98
|
-
FLOW_RUNTIME_TAG=
|
97
|
+
FLOW_RUNTIME_TAG=06-10-2025-1bc69ec
|
99
98
|
FLOW_RUMTIME_REGISTRY=
|
100
99
|
|
101
100
|
|
@@ -108,7 +107,7 @@ JAEGER_PROXY_REGISTRY=
|
|
108
107
|
SOCKET_HANDLER_TAG=29-05-2025
|
109
108
|
SOCKET_HANDLER_REGISTRY=
|
110
109
|
|
111
|
-
CPE_TAG=
|
110
|
+
CPE_TAG=06-10-2025-74e5ca0
|
112
111
|
CPE_REGISTRY=
|
113
112
|
|
114
113
|
VOICE_CONTROLLER_TAG=12-09-2025-0e04772
|
@@ -124,7 +123,7 @@ WDU_REGISTRY=
|
|
124
123
|
DOCPROC_DPS_TAG=20250910-165658-290-c566031
|
125
124
|
DOCPROC_LLMSERVICE_TAG=20250915-main-139-7a36ad3
|
126
125
|
DOCPROC_CACHE_TAG=20250916-master-86-454157f
|
127
|
-
DOCPROC_DPI_TAG=
|
126
|
+
DOCPROC_DPI_TAG=20250926-122324-302-cb9b3a46
|
128
127
|
DOCPROC_REGISTRY=
|
129
128
|
|
130
129
|
ETCD_TAG=
|
@@ -511,7 +511,8 @@ class Flow(Node):
|
|
511
511
|
classes: type[BaseModel]| None = None,
|
512
512
|
description: str | None = None,
|
513
513
|
min_confidence: float = 0.0,
|
514
|
-
input_map: DataMap = None
|
514
|
+
input_map: DataMap = None,
|
515
|
+
enable_review: bool = False) -> DocClassifierNode:
|
515
516
|
|
516
517
|
if name is None :
|
517
518
|
raise ValueError("name must be provided.")
|
@@ -532,7 +533,8 @@ class Flow(Node):
|
|
532
533
|
output_schema=_get_tool_response_body(output_schema_obj),
|
533
534
|
output_schema_object = output_schema_obj,
|
534
535
|
config=doc_classifier_config,
|
535
|
-
version=version
|
536
|
+
version=version,
|
537
|
+
enable_review=enable_review
|
536
538
|
)
|
537
539
|
node = DocClassifierNode(spec=task_spec)
|
538
540
|
|
@@ -584,7 +586,8 @@ class Flow(Node):
|
|
584
586
|
input_map: DataMap = None,
|
585
587
|
enable_hw: bool = False,
|
586
588
|
min_confidence: float = 0, # Setting a small value because htil is not supported for pro code.
|
587
|
-
review_fields: List[str] = []
|
589
|
+
review_fields: List[str] = [],
|
590
|
+
enable_review: bool = False) -> tuple[DocExtNode, type[BaseModel]]:
|
588
591
|
|
589
592
|
if name is None :
|
590
593
|
raise ValueError("name must be provided.")
|
@@ -611,7 +614,8 @@ class Flow(Node):
|
|
611
614
|
version=version,
|
612
615
|
enable_hw=enable_hw,
|
613
616
|
min_confidence=min_confidence,
|
614
|
-
review_fields=review_fields
|
617
|
+
review_fields=review_fields,
|
618
|
+
enable_review=enable_review
|
615
619
|
)
|
616
620
|
node = DocExtNode(spec=task_spec)
|
617
621
|
|
@@ -256,6 +256,7 @@ class DocProcCommonNodeSpec(NodeSpec):
|
|
256
256
|
class DocClassifierSpec(DocProcCommonNodeSpec):
|
257
257
|
version : str = Field(description="A version of the spec")
|
258
258
|
config : DocClassifierConfig
|
259
|
+
enable_review: bool = Field(description="Indicate if enable human in the loop review", default=False)
|
259
260
|
|
260
261
|
def __init__(self, **data):
|
261
262
|
super().__init__(**data)
|
@@ -266,6 +267,7 @@ class DocClassifierSpec(DocProcCommonNodeSpec):
|
|
266
267
|
model_spec["version"] = self.version
|
267
268
|
model_spec["config"] = self.config.model_dump()
|
268
269
|
model_spec["task"] = DocProcTask.custom_document_classification
|
270
|
+
model_spec["enable_review"] = self.enable_review
|
269
271
|
return model_spec
|
270
272
|
|
271
273
|
class DocExtSpec(DocProcCommonNodeSpec):
|
@@ -273,6 +275,7 @@ class DocExtSpec(DocProcCommonNodeSpec):
|
|
273
275
|
config : DocExtConfig
|
274
276
|
min_confidence: float = Field(description="The minimal confidence acceptable for an extracted field value", default=0.0,le=1.0, ge=0.0 ,title="Minimum Confidence")
|
275
277
|
review_fields: List[str] = Field(description="The fields that require user to review", default=[])
|
278
|
+
enable_review: bool = Field(description="Enable human in the loop review", default=False)
|
276
279
|
|
277
280
|
def __init__(self, **data):
|
278
281
|
super().__init__(**data)
|
@@ -285,6 +288,7 @@ class DocExtSpec(DocProcCommonNodeSpec):
|
|
285
288
|
model_spec["task"] = DocProcTask.custom_field_extraction
|
286
289
|
model_spec["min_confidence"] = self.min_confidence
|
287
290
|
model_spec["review_fields"] = self.review_fields
|
291
|
+
model_spec["enable_review"] = self.enable_review
|
288
292
|
return model_spec
|
289
293
|
|
290
294
|
class DocProcField(BaseModel):
|
@@ -170,19 +170,11 @@ async def import_flow_model(model):
|
|
170
170
|
|
171
171
|
return tool_id
|
172
172
|
|
173
|
-
def import_flow_support_tools(model):
|
174
|
-
|
175
|
-
if not is_local_dev():
|
176
|
-
# we can't import support tools into non-local environments yet
|
177
|
-
return []
|
178
|
-
|
179
|
-
|
173
|
+
def import_flow_support_tools(model):
|
180
174
|
schedulable = False
|
181
175
|
if "schedulable" in model["spec"]:
|
182
176
|
schedulable = model["spec"]["schedulable"]
|
183
177
|
|
184
|
-
client = instantiate_client(TempusClient)
|
185
|
-
|
186
178
|
logger.info(f"Import 'get_flow_status' tool spec...")
|
187
179
|
tools = [create_flow_status_tool("i__get_flow_status_intrinsic_tool__")]
|
188
180
|
|
{ibm_watsonx_orchestrate-1.13.0b0.dist-info → ibm_watsonx_orchestrate-1.13.0b1.dist-info}/METADATA
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: ibm-watsonx-orchestrate
|
3
|
-
Version: 1.13.
|
3
|
+
Version: 1.13.0b1
|
4
4
|
Summary: IBM watsonx.orchestrate SDK
|
5
5
|
Author-email: IBM <support@ibm.com>
|
6
6
|
License: MIT License
|
@@ -11,7 +11,7 @@ Requires-Dist: click<8.2.0,>=8.0.0
|
|
11
11
|
Requires-Dist: docstring-parser<1.0,>=0.16
|
12
12
|
Requires-Dist: httpx<1.0.0,>=0.28.1
|
13
13
|
Requires-Dist: ibm-cloud-sdk-core>=3.24.2
|
14
|
-
Requires-Dist: ibm-watsonx-orchestrate-evaluation-framework==1.1.
|
14
|
+
Requires-Dist: ibm-watsonx-orchestrate-evaluation-framework==1.1.5
|
15
15
|
Requires-Dist: jsonref==1.1.0
|
16
16
|
Requires-Dist: langchain-core<=0.3.63
|
17
17
|
Requires-Dist: langsmith<=0.3.45
|
{ibm_watsonx_orchestrate-1.13.0b0.dist-info → ibm_watsonx_orchestrate-1.13.0b1.dist-info}/RECORD
RENAMED
@@ -1,4 +1,4 @@
|
|
1
|
-
ibm_watsonx_orchestrate/__init__.py,sha256=
|
1
|
+
ibm_watsonx_orchestrate/__init__.py,sha256=xSIpIH-mBOz5c3mBLyzucN8QwXNXIeAQprgdXOks4Xk,428
|
2
2
|
ibm_watsonx_orchestrate/agent_builder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
3
|
ibm_watsonx_orchestrate/agent_builder/agents/__init__.py,sha256=lmZwaiWXD4Ea19nrMwZXaqCxFMG29xNS8vUoZtK3yI4,392
|
4
4
|
ibm_watsonx_orchestrate/agent_builder/agents/agent.py,sha256=W0uya81fQPrYZFaO_tlsxBL56Bfpw0xrqdxQJhAZ6XI,983
|
@@ -13,7 +13,7 @@ ibm_watsonx_orchestrate/agent_builder/connections/connections.py,sha256=fBFNlied
|
|
13
13
|
ibm_watsonx_orchestrate/agent_builder/connections/types.py,sha256=m-XhqKpZQyBieqiJ1JyVYVhpF6kgMt0dgou4ZcP6rcQ,13687
|
14
14
|
ibm_watsonx_orchestrate/agent_builder/knowledge_bases/knowledge_base.py,sha256=_KuGF0RZpKpwdt31rzjlTjrhGRFz2RtLzleNkhMNX4k,1831
|
15
15
|
ibm_watsonx_orchestrate/agent_builder/knowledge_bases/knowledge_base_requests.py,sha256=3xTfFMZR17EN8eYRhsVyBfOEzlTqyi0eYaMXyv0_ZtQ,862
|
16
|
-
ibm_watsonx_orchestrate/agent_builder/knowledge_bases/types.py,sha256=
|
16
|
+
ibm_watsonx_orchestrate/agent_builder/knowledge_bases/types.py,sha256=pXYe7JSxq2QRn4aVa3sopYyRi7nn9eCjYqdnynhek90,9579
|
17
17
|
ibm_watsonx_orchestrate/agent_builder/model_policies/__init__.py,sha256=alJEjlneWlGpadmvOVlDjq5wulytKOmpkq3849fhKNc,131
|
18
18
|
ibm_watsonx_orchestrate/agent_builder/model_policies/types.py,sha256=a6f9HP2OlZIe36k_PDRmFtefz2Ms2KBpzJ_jz8ggYbk,882
|
19
19
|
ibm_watsonx_orchestrate/agent_builder/models/__init__.py,sha256=R5nTbyMBzahONdp5-bJFp-rbtTDnp2184k6doZqt67w,31
|
@@ -37,7 +37,7 @@ ibm_watsonx_orchestrate/cli/init_helper.py,sha256=qxnKdFcPtGsV_6RqP_IuLshRxgB004
|
|
37
37
|
ibm_watsonx_orchestrate/cli/main.py,sha256=5AuoVVDHzgNZ6Y2ZR4bSF1cs7AhQRyd8n7S41o8lK4w,3618
|
38
38
|
ibm_watsonx_orchestrate/cli/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
39
39
|
ibm_watsonx_orchestrate/cli/commands/agents/agents_command.py,sha256=mWVojmtxslXL-eGMs7NUNBV_DudmQKeNnTaE9V9jjfU,9832
|
40
|
-
ibm_watsonx_orchestrate/cli/commands/agents/agents_controller.py,sha256=
|
40
|
+
ibm_watsonx_orchestrate/cli/commands/agents/agents_controller.py,sha256=w6y9j5E7fAWGO3ssun9QWUo4I2lpkIwOow4D5YTvThM,66108
|
41
41
|
ibm_watsonx_orchestrate/cli/commands/channels/channels_command.py,sha256=fVIFhPUTPdxsxIE10nWL-W5wvBR-BS8V8D6r__J8R98,822
|
42
42
|
ibm_watsonx_orchestrate/cli/commands/channels/channels_controller.py,sha256=WjQxwJujvo28SsWgfJSXIpkcgniKcskJ2arL4MOz0Ys,455
|
43
43
|
ibm_watsonx_orchestrate/cli/commands/channels/types.py,sha256=hMFvWPr7tAmDrhBqtzfkCsrubX3lsU6lapTSOFsUbHM,475
|
@@ -46,16 +46,17 @@ ibm_watsonx_orchestrate/cli/commands/channels/webchat/channels_webchat_controlle
|
|
46
46
|
ibm_watsonx_orchestrate/cli/commands/chat/chat_command.py,sha256=Q9vg2Z5Fsunu6GQFY_TIsNRhUCa0SSGSPnK4jxSGK34,1581
|
47
47
|
ibm_watsonx_orchestrate/cli/commands/connections/connections_command.py,sha256=1eHwXI_aqS61sKvC5H2DOrcwGbs8a59pLE71bxdQgAw,12981
|
48
48
|
ibm_watsonx_orchestrate/cli/commands/connections/connections_controller.py,sha256=6USaydthtH8SeTmMjJw6UkIdCo_xtzECRMDBx6_uk3Y,32164
|
49
|
-
ibm_watsonx_orchestrate/cli/commands/copilot/copilot_command.py,sha256=
|
50
|
-
ibm_watsonx_orchestrate/cli/commands/copilot/copilot_controller.py,sha256=
|
49
|
+
ibm_watsonx_orchestrate/cli/commands/copilot/copilot_command.py,sha256=EJjVfYG8rW4OX-3wX_NiAvljJMhbJMZ8yb8oo3bsEDM,3867
|
50
|
+
ibm_watsonx_orchestrate/cli/commands/copilot/copilot_controller.py,sha256=MvlOSOcBNGpGj9maBH_AkhHJI6okqz2W3ikrdVbTqPM,31086
|
51
51
|
ibm_watsonx_orchestrate/cli/commands/copilot/copilot_server_controller.py,sha256=eh21hc_eSrSBNs9Tyk921wukzsIICZZY6T7t3qmjQJ0,3865
|
52
52
|
ibm_watsonx_orchestrate/cli/commands/environment/environment_command.py,sha256=1CO0UfxSem0NafNGNCBXswC-P93MVYIA-YJewjKgdNc,4186
|
53
53
|
ibm_watsonx_orchestrate/cli/commands/environment/environment_controller.py,sha256=z3m4khZfHNpv1dRsDfRW0cteLvhAeDEbxh-fOZduSpQ,10479
|
54
54
|
ibm_watsonx_orchestrate/cli/commands/environment/types.py,sha256=X6jEnyBdxakromA7FhQ5btZMj9kwGcwRSFz8vpD65jA,224
|
55
|
-
ibm_watsonx_orchestrate/cli/commands/evaluations/evaluations_command.py,sha256=
|
55
|
+
ibm_watsonx_orchestrate/cli/commands/evaluations/evaluations_command.py,sha256=fmXKlwrS6Mbr4FeImdHEeX0k3Yja3T9pbJgayLw_yd8,23063
|
56
56
|
ibm_watsonx_orchestrate/cli/commands/evaluations/evaluations_controller.py,sha256=avA5Guyyhqw9x0IAp-I9vVyEaS3X5U77Ag4xBw1k8l0,11348
|
57
|
-
ibm_watsonx_orchestrate/cli/commands/
|
58
|
-
ibm_watsonx_orchestrate/cli/commands/knowledge_bases/
|
57
|
+
ibm_watsonx_orchestrate/cli/commands/evaluations/evaluations_environment_manager.py,sha256=Js_UR3aDi3fE6KU7lQ-7RlN_dWZQrynKwrn3ePUVo3c,5519
|
58
|
+
ibm_watsonx_orchestrate/cli/commands/knowledge_bases/knowledge_bases_command.py,sha256=ddVq0nd4GUqPgZ51JK0nv1NvWg1UnsX75fAzYJqIHHw,3435
|
59
|
+
ibm_watsonx_orchestrate/cli/commands/knowledge_bases/knowledge_bases_controller.py,sha256=vDiTvWzEJaFrsyFvCpeH7Lp37Vjnsq2BL9obyM69YHU,16459
|
59
60
|
ibm_watsonx_orchestrate/cli/commands/login/login_command.py,sha256=xArMiojoozg7Exn6HTpbTcjDO2idZRA-y0WV-_Ic1Sk,651
|
60
61
|
ibm_watsonx_orchestrate/cli/commands/models/model_provider_mapper.py,sha256=ldAMx0Vz5cf_ngADzdMrku7twmVmIT4EQ43YrPzgSKk,8855
|
61
62
|
ibm_watsonx_orchestrate/cli/commands/models/models_command.py,sha256=dHuk0F01MEYRrnbdyGeDHYZ3A-QMnz8d3kcrbphWhEg,6448
|
@@ -65,7 +66,7 @@ ibm_watsonx_orchestrate/cli/commands/partners/partners_controller.py,sha256=47DE
|
|
65
66
|
ibm_watsonx_orchestrate/cli/commands/partners/offering/partners_offering_command.py,sha256=X6u5zGwKYY1Uc2szaVHCIyMlFJBzp8o8JgVZUxcZPd8,1727
|
66
67
|
ibm_watsonx_orchestrate/cli/commands/partners/offering/partners_offering_controller.py,sha256=4qIaMwUHcSsPDDqXHS0vuwktyFD18sQyFabbBhr8vjY,19229
|
67
68
|
ibm_watsonx_orchestrate/cli/commands/partners/offering/types.py,sha256=Wc7YyY3dQobx5P5-as45WmTiZiuiSzvSSSDZP-5vj-g,2804
|
68
|
-
ibm_watsonx_orchestrate/cli/commands/server/server_command.py,sha256=
|
69
|
+
ibm_watsonx_orchestrate/cli/commands/server/server_command.py,sha256=Y80Vwal1QZbrvNDarczJ8FDb7szA1bJszEeV-T2R3QM,30634
|
69
70
|
ibm_watsonx_orchestrate/cli/commands/server/types.py,sha256=DGLopPbLFf5yH5-hzsFf5Uaw158QHwkTAcwydbUmZ3Q,4416
|
70
71
|
ibm_watsonx_orchestrate/cli/commands/settings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
71
72
|
ibm_watsonx_orchestrate/cli/commands/settings/settings_command.py,sha256=CzXRkd-97jXyS6LtaaNtMah-aZu0919dYl-mDwzGThc,344
|
@@ -89,7 +90,7 @@ ibm_watsonx_orchestrate/client/credentials.py,sha256=gDVeeQZDdbbjJiO1EI61yx2oRgT
|
|
89
90
|
ibm_watsonx_orchestrate/client/local_service_instance.py,sha256=dt7vfLnjgt7mT8wSq8SJZndNTwsPzhb0XDhcnPUPFpU,3524
|
90
91
|
ibm_watsonx_orchestrate/client/service_instance.py,sha256=20yPs5bfAGN7TKUwMHZgsV2p0vzHr57pZD_rjc-5X80,5861
|
91
92
|
ibm_watsonx_orchestrate/client/utils.py,sha256=pmk44dk3wLOKzfSRIVFWa2oAy2KJ4l0-3PRYvVeLj4s,7491
|
92
|
-
ibm_watsonx_orchestrate/client/agents/agent_client.py,sha256=
|
93
|
+
ibm_watsonx_orchestrate/client/agents/agent_client.py,sha256=4VIT9399HdA9NnC4JRur-_V7aLxBKAOhNQNr7ApqTzQ,6959
|
93
94
|
ibm_watsonx_orchestrate/client/agents/assistant_agent_client.py,sha256=1JQN0E4T_uz5V0LM-LD1ahNu2KCeFBjXAr8WCiP9mkE,1745
|
94
95
|
ibm_watsonx_orchestrate/client/agents/external_agent_client.py,sha256=iQ44XBdC4rYfS-zFn4St1xC5y5gf5SNqKHzMNQcFDZc,1808
|
95
96
|
ibm_watsonx_orchestrate/client/analytics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -98,7 +99,7 @@ ibm_watsonx_orchestrate/client/analytics/llm/analytics_llm_client.py,sha256=0YS_
|
|
98
99
|
ibm_watsonx_orchestrate/client/connections/__init__.py,sha256=J7TOyVg38h71AlaJjlFs5fOuAXTceHvELtOJ9oz4Mvg,207
|
99
100
|
ibm_watsonx_orchestrate/client/connections/connections_client.py,sha256=JUUosNqEq-BasULrY1pkOTa9I2Z95XATN-sEZyjKeXc,8425
|
100
101
|
ibm_watsonx_orchestrate/client/connections/utils.py,sha256=f6HsiDI6cycOqfYN6P4uZ3SQds83xlh83zTUioZPeYk,2618
|
101
|
-
ibm_watsonx_orchestrate/client/copilot/cpe/copilot_cpe_client.py,sha256=
|
102
|
+
ibm_watsonx_orchestrate/client/copilot/cpe/copilot_cpe_client.py,sha256=7Tk6BnLq5_UH6pct7t37joXp7clTy7mkq6SMcadR2JM,4668
|
102
103
|
ibm_watsonx_orchestrate/client/knowledge_bases/knowledge_base_client.py,sha256=yWddZ4W_l-USoAPqPrj7B2qYfKzDT1l0q509dwbBULM,2254
|
103
104
|
ibm_watsonx_orchestrate/client/model_policies/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
104
105
|
ibm_watsonx_orchestrate/client/model_policies/model_policies_client.py,sha256=YJ9OwkO2N78zz12r355ItTBSXp16EkcQZHZWPJLFPuE,2264
|
@@ -106,11 +107,11 @@ ibm_watsonx_orchestrate/client/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCe
|
|
106
107
|
ibm_watsonx_orchestrate/client/models/models_client.py,sha256=PC94TlzWlN1kT1oZTZJYhR85nDv_jNq5O0w091XcIvc,2170
|
107
108
|
ibm_watsonx_orchestrate/client/threads/threads_client.py,sha256=ZptejEUsfvQFRMk8NAV7rzpd5mjawKOLm5Lsj8L_mag,1120
|
108
109
|
ibm_watsonx_orchestrate/client/toolkit/toolkit_client.py,sha256=TLFNS39EeBD_t4Y-rX9sGp4sWBDr--mE5qVtHq8Q2hk,3121
|
109
|
-
ibm_watsonx_orchestrate/client/tools/tempus_client.py,sha256=
|
110
|
+
ibm_watsonx_orchestrate/client/tools/tempus_client.py,sha256=hu9LzkRRucriEyl-KU9aGVRoNjgJo69U0-Rew94CvKk,2012
|
110
111
|
ibm_watsonx_orchestrate/client/tools/tool_client.py,sha256=kYwQp-ym9dYQDOFSTnXNyeh8wzl39LpBJqHSNT9EKT0,2113
|
111
112
|
ibm_watsonx_orchestrate/client/voice_configurations/voice_configurations_client.py,sha256=M5xIPLiVNpP-zxQw8CTNT9AiBjeXXmJiNaE142e2A3E,2682
|
112
|
-
ibm_watsonx_orchestrate/docker/compose-lite.yml,sha256=
|
113
|
-
ibm_watsonx_orchestrate/docker/default.env,sha256=
|
113
|
+
ibm_watsonx_orchestrate/docker/compose-lite.yml,sha256=UuIeyd27_4D3eTrxvVLdA-MTbPbunhikebkV5WLG4f0,48155
|
114
|
+
ibm_watsonx_orchestrate/docker/default.env,sha256=lqFK6j6QP2S5y1rdrCNQc6TeSKVAJN1HADI_6DO3riA,6443
|
114
115
|
ibm_watsonx_orchestrate/docker/proxy-config-single.yaml,sha256=WEbK4ENFuTCYhzRu_QblWp1_GMARgZnx5vReQafkIG8,308
|
115
116
|
ibm_watsonx_orchestrate/docker/start-up.sh,sha256=LTtwHp0AidVgjohis2LXGvZnkFQStOiUAxgGABOyeUI,1811
|
116
117
|
ibm_watsonx_orchestrate/docker/sdk/ibm_watsonx_orchestrate-0.6.0-py3-none-any.whl,sha256=Hi3-owh5OM0Jz2ihX9nLoojnr7Ky1TV-GelyqLcewLE,2047417
|
@@ -119,13 +120,13 @@ ibm_watsonx_orchestrate/docker/tempus/common-config.yaml,sha256=Zo3F36F5DV4VO_vU
|
|
119
120
|
ibm_watsonx_orchestrate/flow_builder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
120
121
|
ibm_watsonx_orchestrate/flow_builder/data_map.py,sha256=LinePFgb5mBnrvNmPkFe3rq5oYJZSjcgmaEGpE6dVwc,586
|
121
122
|
ibm_watsonx_orchestrate/flow_builder/node.py,sha256=Y5hzVkbWNWaYp6zSbLW4Qbg4r1deLAs-X3HEFoZ9vzk,10338
|
122
|
-
ibm_watsonx_orchestrate/flow_builder/types.py,sha256=
|
123
|
-
ibm_watsonx_orchestrate/flow_builder/utils.py,sha256=
|
123
|
+
ibm_watsonx_orchestrate/flow_builder/types.py,sha256=Y0LIEWbijKtSmppVFVycEs6cH8lqIW2XQSa-uQ469Zk,71840
|
124
|
+
ibm_watsonx_orchestrate/flow_builder/utils.py,sha256=QnI5vUZiBHA4Px650zOrJNG9DtvXiCIQPh9lOetmQno,11761
|
124
125
|
ibm_watsonx_orchestrate/flow_builder/flows/__init__.py,sha256=iRYV0_eXgBBGhuNnvg-mUyPUyCIw5BiallPOp27bzYM,1083
|
125
126
|
ibm_watsonx_orchestrate/flow_builder/flows/constants.py,sha256=-TGneZyjA4YiAtJJK7OmmjDHYQC4mw2e98MPAZqiB50,324
|
126
127
|
ibm_watsonx_orchestrate/flow_builder/flows/decorators.py,sha256=07yaaDXLrwmj0v9lhZli8UmnKVpIuF6x1t3JbPVj0F8,3247
|
127
128
|
ibm_watsonx_orchestrate/flow_builder/flows/events.py,sha256=VyaBm0sADwr15LWfKbcBQS1M80NKqzYDj3UlW3OpOf4,2984
|
128
|
-
ibm_watsonx_orchestrate/flow_builder/flows/flow.py,sha256=
|
129
|
+
ibm_watsonx_orchestrate/flow_builder/flows/flow.py,sha256=pi35CAg4Bx2AasLjy3URfskOLUvjgTb7Bbi0fMskDsU,68900
|
129
130
|
ibm_watsonx_orchestrate/langflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
130
131
|
ibm_watsonx_orchestrate/langflow/langflow_utils.py,sha256=UIWN28WvaYNhV-Ph_x0HSqV7jbL8lQlaUBdYeELqXJo,5765
|
131
132
|
ibm_watsonx_orchestrate/langflow/lfx_deps.py,sha256=Bgbo8IQEX_TblGLmw1tnA0DP7Xm4SFRncPp4TGbqY1o,1396
|
@@ -141,8 +142,8 @@ ibm_watsonx_orchestrate/utils/utils.py,sha256=3k4qXVrgaBsvVt-nnjKrBwioNE2b0aflrv
|
|
141
142
|
ibm_watsonx_orchestrate/utils/logging/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
142
143
|
ibm_watsonx_orchestrate/utils/logging/logger.py,sha256=FzeGnidXAjC7yHrvIaj4KZPeaBBSCniZFlwgr5yV3oA,1037
|
143
144
|
ibm_watsonx_orchestrate/utils/logging/logging.yaml,sha256=9_TKfuFr1barnOKP0fZT5D6MhddiwsXVTFjtRbcOO5w,314
|
144
|
-
ibm_watsonx_orchestrate-1.13.
|
145
|
-
ibm_watsonx_orchestrate-1.13.
|
146
|
-
ibm_watsonx_orchestrate-1.13.
|
147
|
-
ibm_watsonx_orchestrate-1.13.
|
148
|
-
ibm_watsonx_orchestrate-1.13.
|
145
|
+
ibm_watsonx_orchestrate-1.13.0b1.dist-info/METADATA,sha256=kVBl7sWJuOj8I6-A9sqotUggzOehRdJOBB3aCueFXlE,1363
|
146
|
+
ibm_watsonx_orchestrate-1.13.0b1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
147
|
+
ibm_watsonx_orchestrate-1.13.0b1.dist-info/entry_points.txt,sha256=SfIT02-Jen5e99OcLhzbcM9Bdyf8SGVOCtnSplgZdQI,69
|
148
|
+
ibm_watsonx_orchestrate-1.13.0b1.dist-info/licenses/LICENSE,sha256=Shgxx7hTdCOkiVRmfGgp_1ISISrwQD7m2f0y8Hsapl4,1083
|
149
|
+
ibm_watsonx_orchestrate-1.13.0b1.dist-info/RECORD,,
|
{ibm_watsonx_orchestrate-1.13.0b0.dist-info → ibm_watsonx_orchestrate-1.13.0b1.dist-info}/WHEEL
RENAMED
File without changes
|
File without changes
|
File without changes
|