aiqtoolkit 1.1.0a20250429__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiqtoolkit might be problematic. Click here for more details.
- aiq/agent/__init__.py +0 -0
- aiq/agent/base.py +76 -0
- aiq/agent/dual_node.py +67 -0
- aiq/agent/react_agent/__init__.py +0 -0
- aiq/agent/react_agent/agent.py +322 -0
- aiq/agent/react_agent/output_parser.py +104 -0
- aiq/agent/react_agent/prompt.py +46 -0
- aiq/agent/react_agent/register.py +148 -0
- aiq/agent/reasoning_agent/__init__.py +0 -0
- aiq/agent/reasoning_agent/reasoning_agent.py +224 -0
- aiq/agent/register.py +23 -0
- aiq/agent/rewoo_agent/__init__.py +0 -0
- aiq/agent/rewoo_agent/agent.py +410 -0
- aiq/agent/rewoo_agent/prompt.py +108 -0
- aiq/agent/rewoo_agent/register.py +158 -0
- aiq/agent/tool_calling_agent/__init__.py +0 -0
- aiq/agent/tool_calling_agent/agent.py +123 -0
- aiq/agent/tool_calling_agent/register.py +105 -0
- aiq/builder/__init__.py +0 -0
- aiq/builder/builder.py +223 -0
- aiq/builder/component_utils.py +303 -0
- aiq/builder/context.py +198 -0
- aiq/builder/embedder.py +24 -0
- aiq/builder/eval_builder.py +116 -0
- aiq/builder/evaluator.py +29 -0
- aiq/builder/framework_enum.py +24 -0
- aiq/builder/front_end.py +73 -0
- aiq/builder/function.py +297 -0
- aiq/builder/function_base.py +372 -0
- aiq/builder/function_info.py +627 -0
- aiq/builder/intermediate_step_manager.py +125 -0
- aiq/builder/llm.py +25 -0
- aiq/builder/retriever.py +25 -0
- aiq/builder/user_interaction_manager.py +71 -0
- aiq/builder/workflow.py +134 -0
- aiq/builder/workflow_builder.py +733 -0
- aiq/cli/__init__.py +14 -0
- aiq/cli/cli_utils/__init__.py +0 -0
- aiq/cli/cli_utils/config_override.py +233 -0
- aiq/cli/cli_utils/validation.py +37 -0
- aiq/cli/commands/__init__.py +0 -0
- aiq/cli/commands/configure/__init__.py +0 -0
- aiq/cli/commands/configure/channel/__init__.py +0 -0
- aiq/cli/commands/configure/channel/add.py +28 -0
- aiq/cli/commands/configure/channel/channel.py +34 -0
- aiq/cli/commands/configure/channel/remove.py +30 -0
- aiq/cli/commands/configure/channel/update.py +30 -0
- aiq/cli/commands/configure/configure.py +33 -0
- aiq/cli/commands/evaluate.py +139 -0
- aiq/cli/commands/info/__init__.py +14 -0
- aiq/cli/commands/info/info.py +37 -0
- aiq/cli/commands/info/list_channels.py +32 -0
- aiq/cli/commands/info/list_components.py +129 -0
- aiq/cli/commands/registry/__init__.py +14 -0
- aiq/cli/commands/registry/publish.py +88 -0
- aiq/cli/commands/registry/pull.py +118 -0
- aiq/cli/commands/registry/registry.py +36 -0
- aiq/cli/commands/registry/remove.py +108 -0
- aiq/cli/commands/registry/search.py +155 -0
- aiq/cli/commands/start.py +250 -0
- aiq/cli/commands/uninstall.py +83 -0
- aiq/cli/commands/validate.py +47 -0
- aiq/cli/commands/workflow/__init__.py +14 -0
- aiq/cli/commands/workflow/templates/__init__.py.j2 +0 -0
- aiq/cli/commands/workflow/templates/config.yml.j2 +16 -0
- aiq/cli/commands/workflow/templates/pyproject.toml.j2 +22 -0
- aiq/cli/commands/workflow/templates/register.py.j2 +5 -0
- aiq/cli/commands/workflow/templates/workflow.py.j2 +36 -0
- aiq/cli/commands/workflow/workflow.py +37 -0
- aiq/cli/commands/workflow/workflow_commands.py +307 -0
- aiq/cli/entrypoint.py +133 -0
- aiq/cli/main.py +44 -0
- aiq/cli/register_workflow.py +408 -0
- aiq/cli/type_registry.py +869 -0
- aiq/data_models/__init__.py +14 -0
- aiq/data_models/api_server.py +550 -0
- aiq/data_models/common.py +143 -0
- aiq/data_models/component.py +46 -0
- aiq/data_models/component_ref.py +135 -0
- aiq/data_models/config.py +349 -0
- aiq/data_models/dataset_handler.py +122 -0
- aiq/data_models/discovery_metadata.py +269 -0
- aiq/data_models/embedder.py +26 -0
- aiq/data_models/evaluate.py +101 -0
- aiq/data_models/evaluator.py +26 -0
- aiq/data_models/front_end.py +26 -0
- aiq/data_models/function.py +30 -0
- aiq/data_models/function_dependencies.py +64 -0
- aiq/data_models/interactive.py +237 -0
- aiq/data_models/intermediate_step.py +269 -0
- aiq/data_models/invocation_node.py +38 -0
- aiq/data_models/llm.py +26 -0
- aiq/data_models/logging.py +26 -0
- aiq/data_models/memory.py +26 -0
- aiq/data_models/profiler.py +53 -0
- aiq/data_models/registry_handler.py +26 -0
- aiq/data_models/retriever.py +30 -0
- aiq/data_models/step_adaptor.py +64 -0
- aiq/data_models/streaming.py +33 -0
- aiq/data_models/swe_bench_model.py +54 -0
- aiq/data_models/telemetry_exporter.py +26 -0
- aiq/embedder/__init__.py +0 -0
- aiq/embedder/langchain_client.py +41 -0
- aiq/embedder/nim_embedder.py +58 -0
- aiq/embedder/openai_embedder.py +42 -0
- aiq/embedder/register.py +24 -0
- aiq/eval/__init__.py +14 -0
- aiq/eval/config.py +42 -0
- aiq/eval/dataset_handler/__init__.py +0 -0
- aiq/eval/dataset_handler/dataset_downloader.py +106 -0
- aiq/eval/dataset_handler/dataset_filter.py +52 -0
- aiq/eval/dataset_handler/dataset_handler.py +164 -0
- aiq/eval/evaluate.py +322 -0
- aiq/eval/evaluator/__init__.py +14 -0
- aiq/eval/evaluator/evaluator_model.py +44 -0
- aiq/eval/intermediate_step_adapter.py +93 -0
- aiq/eval/rag_evaluator/__init__.py +0 -0
- aiq/eval/rag_evaluator/evaluate.py +138 -0
- aiq/eval/rag_evaluator/register.py +138 -0
- aiq/eval/register.py +22 -0
- aiq/eval/remote_workflow.py +128 -0
- aiq/eval/runtime_event_subscriber.py +52 -0
- aiq/eval/swe_bench_evaluator/__init__.py +0 -0
- aiq/eval/swe_bench_evaluator/evaluate.py +215 -0
- aiq/eval/swe_bench_evaluator/register.py +36 -0
- aiq/eval/trajectory_evaluator/__init__.py +0 -0
- aiq/eval/trajectory_evaluator/evaluate.py +118 -0
- aiq/eval/trajectory_evaluator/register.py +40 -0
- aiq/eval/utils/__init__.py +0 -0
- aiq/eval/utils/output_uploader.py +131 -0
- aiq/eval/utils/tqdm_position_registry.py +40 -0
- aiq/front_ends/__init__.py +14 -0
- aiq/front_ends/console/__init__.py +14 -0
- aiq/front_ends/console/console_front_end_config.py +32 -0
- aiq/front_ends/console/console_front_end_plugin.py +107 -0
- aiq/front_ends/console/register.py +25 -0
- aiq/front_ends/cron/__init__.py +14 -0
- aiq/front_ends/fastapi/__init__.py +14 -0
- aiq/front_ends/fastapi/fastapi_front_end_config.py +150 -0
- aiq/front_ends/fastapi/fastapi_front_end_plugin.py +103 -0
- aiq/front_ends/fastapi/fastapi_front_end_plugin_worker.py +574 -0
- aiq/front_ends/fastapi/intermediate_steps_subscriber.py +80 -0
- aiq/front_ends/fastapi/job_store.py +161 -0
- aiq/front_ends/fastapi/main.py +70 -0
- aiq/front_ends/fastapi/message_handler.py +279 -0
- aiq/front_ends/fastapi/message_validator.py +345 -0
- aiq/front_ends/fastapi/register.py +25 -0
- aiq/front_ends/fastapi/response_helpers.py +181 -0
- aiq/front_ends/fastapi/step_adaptor.py +315 -0
- aiq/front_ends/fastapi/websocket.py +148 -0
- aiq/front_ends/mcp/__init__.py +14 -0
- aiq/front_ends/mcp/mcp_front_end_config.py +32 -0
- aiq/front_ends/mcp/mcp_front_end_plugin.py +93 -0
- aiq/front_ends/mcp/register.py +27 -0
- aiq/front_ends/mcp/tool_converter.py +242 -0
- aiq/front_ends/register.py +22 -0
- aiq/front_ends/simple_base/__init__.py +14 -0
- aiq/front_ends/simple_base/simple_front_end_plugin_base.py +52 -0
- aiq/llm/__init__.py +0 -0
- aiq/llm/nim_llm.py +45 -0
- aiq/llm/openai_llm.py +45 -0
- aiq/llm/register.py +22 -0
- aiq/llm/utils/__init__.py +14 -0
- aiq/llm/utils/env_config_value.py +94 -0
- aiq/llm/utils/error.py +17 -0
- aiq/memory/__init__.py +20 -0
- aiq/memory/interfaces.py +183 -0
- aiq/memory/models.py +102 -0
- aiq/meta/module_to_distro.json +3 -0
- aiq/meta/pypi.md +59 -0
- aiq/observability/__init__.py +0 -0
- aiq/observability/async_otel_listener.py +270 -0
- aiq/observability/register.py +97 -0
- aiq/plugins/.namespace +1 -0
- aiq/profiler/__init__.py +0 -0
- aiq/profiler/callbacks/__init__.py +0 -0
- aiq/profiler/callbacks/agno_callback_handler.py +295 -0
- aiq/profiler/callbacks/base_callback_class.py +20 -0
- aiq/profiler/callbacks/langchain_callback_handler.py +278 -0
- aiq/profiler/callbacks/llama_index_callback_handler.py +205 -0
- aiq/profiler/callbacks/semantic_kernel_callback_handler.py +238 -0
- aiq/profiler/callbacks/token_usage_base_model.py +27 -0
- aiq/profiler/data_frame_row.py +51 -0
- aiq/profiler/decorators/__init__.py +0 -0
- aiq/profiler/decorators/framework_wrapper.py +131 -0
- aiq/profiler/decorators/function_tracking.py +254 -0
- aiq/profiler/forecasting/__init__.py +0 -0
- aiq/profiler/forecasting/config.py +18 -0
- aiq/profiler/forecasting/model_trainer.py +75 -0
- aiq/profiler/forecasting/models/__init__.py +22 -0
- aiq/profiler/forecasting/models/forecasting_base_model.py +40 -0
- aiq/profiler/forecasting/models/linear_model.py +196 -0
- aiq/profiler/forecasting/models/random_forest_regressor.py +268 -0
- aiq/profiler/inference_metrics_model.py +25 -0
- aiq/profiler/inference_optimization/__init__.py +0 -0
- aiq/profiler/inference_optimization/bottleneck_analysis/__init__.py +0 -0
- aiq/profiler/inference_optimization/bottleneck_analysis/nested_stack_analysis.py +452 -0
- aiq/profiler/inference_optimization/bottleneck_analysis/simple_stack_analysis.py +258 -0
- aiq/profiler/inference_optimization/data_models.py +386 -0
- aiq/profiler/inference_optimization/experimental/__init__.py +0 -0
- aiq/profiler/inference_optimization/experimental/concurrency_spike_analysis.py +468 -0
- aiq/profiler/inference_optimization/experimental/prefix_span_analysis.py +405 -0
- aiq/profiler/inference_optimization/llm_metrics.py +212 -0
- aiq/profiler/inference_optimization/prompt_caching.py +163 -0
- aiq/profiler/inference_optimization/token_uniqueness.py +107 -0
- aiq/profiler/inference_optimization/workflow_runtimes.py +72 -0
- aiq/profiler/intermediate_property_adapter.py +102 -0
- aiq/profiler/profile_runner.py +433 -0
- aiq/profiler/utils.py +184 -0
- aiq/registry_handlers/__init__.py +0 -0
- aiq/registry_handlers/local/__init__.py +0 -0
- aiq/registry_handlers/local/local_handler.py +176 -0
- aiq/registry_handlers/local/register_local.py +37 -0
- aiq/registry_handlers/metadata_factory.py +60 -0
- aiq/registry_handlers/package_utils.py +198 -0
- aiq/registry_handlers/pypi/__init__.py +0 -0
- aiq/registry_handlers/pypi/pypi_handler.py +251 -0
- aiq/registry_handlers/pypi/register_pypi.py +40 -0
- aiq/registry_handlers/register.py +21 -0
- aiq/registry_handlers/registry_handler_base.py +157 -0
- aiq/registry_handlers/rest/__init__.py +0 -0
- aiq/registry_handlers/rest/register_rest.py +56 -0
- aiq/registry_handlers/rest/rest_handler.py +237 -0
- aiq/registry_handlers/schemas/__init__.py +0 -0
- aiq/registry_handlers/schemas/headers.py +42 -0
- aiq/registry_handlers/schemas/package.py +68 -0
- aiq/registry_handlers/schemas/publish.py +63 -0
- aiq/registry_handlers/schemas/pull.py +81 -0
- aiq/registry_handlers/schemas/remove.py +36 -0
- aiq/registry_handlers/schemas/search.py +91 -0
- aiq/registry_handlers/schemas/status.py +47 -0
- aiq/retriever/__init__.py +0 -0
- aiq/retriever/interface.py +37 -0
- aiq/retriever/milvus/__init__.py +14 -0
- aiq/retriever/milvus/register.py +81 -0
- aiq/retriever/milvus/retriever.py +228 -0
- aiq/retriever/models.py +74 -0
- aiq/retriever/nemo_retriever/__init__.py +14 -0
- aiq/retriever/nemo_retriever/register.py +60 -0
- aiq/retriever/nemo_retriever/retriever.py +190 -0
- aiq/retriever/register.py +22 -0
- aiq/runtime/__init__.py +14 -0
- aiq/runtime/loader.py +188 -0
- aiq/runtime/runner.py +176 -0
- aiq/runtime/session.py +116 -0
- aiq/settings/__init__.py +0 -0
- aiq/settings/global_settings.py +318 -0
- aiq/test/.namespace +1 -0
- aiq/tool/__init__.py +0 -0
- aiq/tool/code_execution/__init__.py +0 -0
- aiq/tool/code_execution/code_sandbox.py +188 -0
- aiq/tool/code_execution/local_sandbox/Dockerfile.sandbox +60 -0
- aiq/tool/code_execution/local_sandbox/__init__.py +13 -0
- aiq/tool/code_execution/local_sandbox/local_sandbox_server.py +79 -0
- aiq/tool/code_execution/local_sandbox/sandbox.requirements.txt +4 -0
- aiq/tool/code_execution/local_sandbox/start_local_sandbox.sh +25 -0
- aiq/tool/code_execution/register.py +70 -0
- aiq/tool/code_execution/utils.py +100 -0
- aiq/tool/datetime_tools.py +42 -0
- aiq/tool/document_search.py +141 -0
- aiq/tool/github_tools/__init__.py +0 -0
- aiq/tool/github_tools/create_github_commit.py +133 -0
- aiq/tool/github_tools/create_github_issue.py +87 -0
- aiq/tool/github_tools/create_github_pr.py +106 -0
- aiq/tool/github_tools/get_github_file.py +106 -0
- aiq/tool/github_tools/get_github_issue.py +166 -0
- aiq/tool/github_tools/get_github_pr.py +256 -0
- aiq/tool/github_tools/update_github_issue.py +100 -0
- aiq/tool/mcp/__init__.py +14 -0
- aiq/tool/mcp/mcp_client.py +220 -0
- aiq/tool/mcp/mcp_tool.py +75 -0
- aiq/tool/memory_tools/__init__.py +0 -0
- aiq/tool/memory_tools/add_memory_tool.py +67 -0
- aiq/tool/memory_tools/delete_memory_tool.py +67 -0
- aiq/tool/memory_tools/get_memory_tool.py +72 -0
- aiq/tool/nvidia_rag.py +95 -0
- aiq/tool/register.py +36 -0
- aiq/tool/retriever.py +89 -0
- aiq/utils/__init__.py +0 -0
- aiq/utils/data_models/__init__.py +0 -0
- aiq/utils/data_models/schema_validator.py +58 -0
- aiq/utils/debugging_utils.py +43 -0
- aiq/utils/exception_handlers/__init__.py +0 -0
- aiq/utils/exception_handlers/schemas.py +114 -0
- aiq/utils/io/__init__.py +0 -0
- aiq/utils/io/yaml_tools.py +50 -0
- aiq/utils/metadata_utils.py +74 -0
- aiq/utils/producer_consumer_queue.py +178 -0
- aiq/utils/reactive/__init__.py +0 -0
- aiq/utils/reactive/base/__init__.py +0 -0
- aiq/utils/reactive/base/observable_base.py +65 -0
- aiq/utils/reactive/base/observer_base.py +55 -0
- aiq/utils/reactive/base/subject_base.py +79 -0
- aiq/utils/reactive/observable.py +59 -0
- aiq/utils/reactive/observer.py +76 -0
- aiq/utils/reactive/subject.py +131 -0
- aiq/utils/reactive/subscription.py +49 -0
- aiq/utils/settings/__init__.py +0 -0
- aiq/utils/settings/global_settings.py +197 -0
- aiq/utils/type_converter.py +232 -0
- aiq/utils/type_utils.py +397 -0
- aiq/utils/url_utils.py +27 -0
- aiqtoolkit-1.1.0a20250429.dist-info/METADATA +326 -0
- aiqtoolkit-1.1.0a20250429.dist-info/RECORD +309 -0
- aiqtoolkit-1.1.0a20250429.dist-info/WHEEL +5 -0
- aiqtoolkit-1.1.0a20250429.dist-info/entry_points.txt +17 -0
- aiqtoolkit-1.1.0a20250429.dist-info/licenses/LICENSE-3rd-party.txt +3686 -0
- aiqtoolkit-1.1.0a20250429.dist-info/licenses/LICENSE.md +201 -0
- aiqtoolkit-1.1.0a20250429.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import json
|
|
17
|
+
|
|
18
|
+
import pandas as pd
|
|
19
|
+
|
|
20
|
+
from aiq.data_models.dataset_handler import EvalDatasetConfig
|
|
21
|
+
from aiq.data_models.dataset_handler import EvalDatasetJsonConfig
|
|
22
|
+
from aiq.data_models.intermediate_step import IntermediateStep
|
|
23
|
+
from aiq.eval.dataset_handler.dataset_downloader import DatasetDownloader
|
|
24
|
+
from aiq.eval.dataset_handler.dataset_filter import DatasetFilter
|
|
25
|
+
from aiq.eval.evaluator.evaluator_model import EvalInput
|
|
26
|
+
from aiq.eval.evaluator.evaluator_model import EvalInputItem
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class DatasetHandler:
|
|
30
|
+
"""
|
|
31
|
+
Read the datasets and pre-process (apply filters, deduplicate etc.) before turning them into EvalInput objects.
|
|
32
|
+
One DatasetHandler object is needed for each dataset to be evaluated.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(self, dataset_config: EvalDatasetConfig, reps: int):
|
|
36
|
+
from aiq.eval.intermediate_step_adapter import IntermediateStepAdapter
|
|
37
|
+
|
|
38
|
+
self.dataset_config = dataset_config
|
|
39
|
+
self.dataset_filter = DatasetFilter(dataset_config.filter)
|
|
40
|
+
self.reps = reps
|
|
41
|
+
# Helpers
|
|
42
|
+
self.intermediate_step_adapter = IntermediateStepAdapter()
|
|
43
|
+
|
|
44
|
+
def is_structured_input(self) -> bool:
|
|
45
|
+
'''Check if the input is structured or unstructured'''
|
|
46
|
+
return not self.dataset_config.structure.disable
|
|
47
|
+
|
|
48
|
+
@property
|
|
49
|
+
def id_key(self) -> str:
|
|
50
|
+
return self.dataset_config.id_key
|
|
51
|
+
|
|
52
|
+
@property
|
|
53
|
+
def question_key(self) -> str:
|
|
54
|
+
return self.dataset_config.structure.question_key
|
|
55
|
+
|
|
56
|
+
@property
|
|
57
|
+
def answer_key(self) -> str:
|
|
58
|
+
return self.dataset_config.structure.answer_key
|
|
59
|
+
|
|
60
|
+
@property
|
|
61
|
+
def generated_answer_key(self) -> str:
|
|
62
|
+
return self.dataset_config.structure.generated_answer_key
|
|
63
|
+
|
|
64
|
+
@property
|
|
65
|
+
def trajectory_key(self) -> str:
|
|
66
|
+
return self.dataset_config.structure.trajectory_key
|
|
67
|
+
|
|
68
|
+
@property
|
|
69
|
+
def expected_trajectory_key(self) -> str:
|
|
70
|
+
return self.dataset_config.structure.expected_trajectory_key
|
|
71
|
+
|
|
72
|
+
def get_eval_input_from_df(self, input_df: pd.DataFrame) -> EvalInput:
|
|
73
|
+
|
|
74
|
+
def create_eval_item(row: pd.Series, structured: bool) -> EvalInputItem:
|
|
75
|
+
"""Helper function to create EvalInputItem."""
|
|
76
|
+
return EvalInputItem(
|
|
77
|
+
id=row.get(self.id_key, ""),
|
|
78
|
+
input_obj=row.to_json() if not structured else row.get(self.question_key, ""),
|
|
79
|
+
expected_output_obj=row.get(self.answer_key, "") if structured else "",
|
|
80
|
+
output_obj=row.get(self.generated_answer_key, "") if structured else "",
|
|
81
|
+
trajectory=row.get(self.trajectory_key, []) if structured else [],
|
|
82
|
+
expected_trajectory=row.get(self.expected_trajectory_key, []) if structured else [],
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
# if input dataframe is empty return an empty list
|
|
86
|
+
if input_df.empty:
|
|
87
|
+
return EvalInput(eval_input_items=[])
|
|
88
|
+
|
|
89
|
+
structured = self.is_structured_input()
|
|
90
|
+
if structured:
|
|
91
|
+
# For structured input, question is mandatory. Ignore rows with missing or empty questions
|
|
92
|
+
input_df = input_df[input_df[self.question_key].notnull() & input_df[self.question_key].str.strip().ne("")]
|
|
93
|
+
eval_input_items = [create_eval_item(row, structured) for _, row in input_df.iterrows()]
|
|
94
|
+
|
|
95
|
+
return EvalInput(eval_input_items=eval_input_items)
|
|
96
|
+
|
|
97
|
+
def setup_reps(self, input_df: pd.DataFrame) -> pd.DataFrame:
|
|
98
|
+
"""replicate the rows and update the id to id_key + "_rep" + rep_number"""
|
|
99
|
+
# Replicate the rows
|
|
100
|
+
input_df = pd.concat([input_df] * self.reps, ignore_index=True)
|
|
101
|
+
# Compute repetition index
|
|
102
|
+
rep_index = input_df.groupby(self.dataset_config.id_key).cumcount().astype(str)
|
|
103
|
+
# Convert id_key to string (id can be integer) if needed and update IDs
|
|
104
|
+
input_df[self.dataset_config.id_key] = input_df[self.dataset_config.id_key].astype(str) + "_rep" + rep_index
|
|
105
|
+
# Ensure unique ID values after modification
|
|
106
|
+
input_df.drop_duplicates(subset=[self.dataset_config.id_key], inplace=True)
|
|
107
|
+
|
|
108
|
+
return input_df
|
|
109
|
+
|
|
110
|
+
def get_eval_input_from_dataset(self, dataset: str) -> EvalInput:
|
|
111
|
+
# read the dataset and convert it to EvalInput
|
|
112
|
+
|
|
113
|
+
# if a dataset file has been provided in the command line, use that
|
|
114
|
+
dataset_config = EvalDatasetJsonConfig(file_path=dataset) if dataset else self.dataset_config
|
|
115
|
+
|
|
116
|
+
# Download the dataset if it is remote
|
|
117
|
+
downloader = DatasetDownloader(dataset_config=dataset_config)
|
|
118
|
+
downloader.download_dataset()
|
|
119
|
+
|
|
120
|
+
parser, kwargs = dataset_config.parser()
|
|
121
|
+
# Parse the dataset into a DataFrame
|
|
122
|
+
input_df = parser(dataset_config.file_path, **kwargs)
|
|
123
|
+
|
|
124
|
+
# Apply filters and deduplicate
|
|
125
|
+
input_df = self.dataset_filter.apply_filters(input_df)
|
|
126
|
+
input_df.drop_duplicates(subset=[self.dataset_config.id_key], inplace=True)
|
|
127
|
+
|
|
128
|
+
# If more than one repetition is needed, replicate the rows
|
|
129
|
+
if self.reps > 1:
|
|
130
|
+
input_df = self.setup_reps(input_df)
|
|
131
|
+
|
|
132
|
+
# Convert the DataFrame to a list of EvalInput objects
|
|
133
|
+
return self.get_eval_input_from_df(input_df)
|
|
134
|
+
|
|
135
|
+
def filter_intermediate_steps(self, intermediate_steps: list[IntermediateStep]) -> list[dict]:
|
|
136
|
+
"""
|
|
137
|
+
Filter out the intermediate steps that are not relevant for evaluation.
|
|
138
|
+
The output is written with with the intention of re-running the evaluation using the original config file.
|
|
139
|
+
"""
|
|
140
|
+
filtered_steps = self.intermediate_step_adapter.filter_intermediate_steps(
|
|
141
|
+
intermediate_steps, self.intermediate_step_adapter.DEFAULT_EVENT_FILTER)
|
|
142
|
+
return self.intermediate_step_adapter.serialize_intermediate_steps(filtered_steps)
|
|
143
|
+
|
|
144
|
+
def publish_eval_input(self, eval_input) -> str:
|
|
145
|
+
"""
|
|
146
|
+
Convert the EvalInput object to a JSON output for storing in a file. Use the orginal keys to
|
|
147
|
+
allow re-running evaluation using the orignal config file and '--skip_workflow' option.
|
|
148
|
+
"""
|
|
149
|
+
indent = 2
|
|
150
|
+
if self.is_structured_input():
|
|
151
|
+
# Extract structured data from EvalInputItems
|
|
152
|
+
data = [{
|
|
153
|
+
self.id_key: item.id,
|
|
154
|
+
self.question_key: item.input_obj,
|
|
155
|
+
self.answer_key: item.expected_output_obj,
|
|
156
|
+
self.generated_answer_key: item.output_obj,
|
|
157
|
+
self.trajectory_key: self.filter_intermediate_steps(item.trajectory),
|
|
158
|
+
self.expected_trajectory_key: self.filter_intermediate_steps(item.expected_trajectory),
|
|
159
|
+
} for item in eval_input.eval_input_items]
|
|
160
|
+
else:
|
|
161
|
+
# Unstructured case: return only raw output objects as a JSON array
|
|
162
|
+
data = [json.loads(item.output_obj) for item in eval_input.eval_input_items]
|
|
163
|
+
|
|
164
|
+
return json.dumps(data, indent=indent, ensure_ascii=False, default=str)
|
aiq/eval/evaluate.py
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import asyncio
|
|
17
|
+
import logging
|
|
18
|
+
import shutil
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from typing import Any
|
|
21
|
+
|
|
22
|
+
from pydantic import BaseModel
|
|
23
|
+
from tqdm import tqdm
|
|
24
|
+
|
|
25
|
+
from aiq.data_models.evaluate import EvalConfig
|
|
26
|
+
from aiq.eval.config import EvaluationRunConfig
|
|
27
|
+
from aiq.eval.config import EvaluationRunOutput
|
|
28
|
+
from aiq.eval.dataset_handler.dataset_handler import DatasetHandler
|
|
29
|
+
from aiq.eval.evaluator.evaluator_model import EvalInput
|
|
30
|
+
from aiq.eval.evaluator.evaluator_model import EvalInputItem
|
|
31
|
+
from aiq.eval.evaluator.evaluator_model import EvalOutput
|
|
32
|
+
from aiq.eval.utils.output_uploader import OutputUploader
|
|
33
|
+
from aiq.runtime.session import AIQSessionManager
|
|
34
|
+
|
|
35
|
+
logger = logging.getLogger(__name__)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class EvaluationRun: # pylint: disable=too-many-public-methods
|
|
39
|
+
"""
|
|
40
|
+
Instantiated for each evaluation run and used to store data for that single run.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __init__(self, config: EvaluationRunConfig):
|
|
44
|
+
"""
|
|
45
|
+
Initialize an EvaluationRun with configuration.
|
|
46
|
+
"""
|
|
47
|
+
from aiq.eval.intermediate_step_adapter import IntermediateStepAdapter
|
|
48
|
+
|
|
49
|
+
# Run-specific configuration
|
|
50
|
+
self.config: EvaluationRunConfig = config
|
|
51
|
+
self.eval_config: EvalConfig | None = None
|
|
52
|
+
|
|
53
|
+
# Helpers
|
|
54
|
+
self.intermediate_step_adapter: IntermediateStepAdapter = IntermediateStepAdapter()
|
|
55
|
+
|
|
56
|
+
# Metadata
|
|
57
|
+
self.eval_input: EvalInput | None = None
|
|
58
|
+
self.workflow_interrupted: bool = False
|
|
59
|
+
|
|
60
|
+
# evaluation_results is list of tuples (evaluator_name, EvalOutput)
|
|
61
|
+
self.evaluation_results: list[tuple[str, EvalOutput]] = []
|
|
62
|
+
|
|
63
|
+
# workflow output file
|
|
64
|
+
self.workflow_output_file: Path | None = None
|
|
65
|
+
|
|
66
|
+
# evaluation output files
|
|
67
|
+
self.evaluator_output_files: list[Path] = []
|
|
68
|
+
|
|
69
|
+
async def run_workflow_local(self, session_manager: AIQSessionManager):
|
|
70
|
+
'''
|
|
71
|
+
Launch the workflow with the specified questions and extract the output using the jsonpath
|
|
72
|
+
'''
|
|
73
|
+
# import function level dependencies
|
|
74
|
+
from jsonpath_ng import parse
|
|
75
|
+
|
|
76
|
+
from aiq.eval.runtime_event_subscriber import pull_intermediate
|
|
77
|
+
|
|
78
|
+
# Run the workflow
|
|
79
|
+
jsonpath_expr = parse(self.config.result_json_path)
|
|
80
|
+
stop_event = asyncio.Event()
|
|
81
|
+
|
|
82
|
+
async def run_one(item: EvalInputItem):
|
|
83
|
+
if stop_event.is_set():
|
|
84
|
+
return "", []
|
|
85
|
+
|
|
86
|
+
async with session_manager.run(item.input_obj) as runner:
|
|
87
|
+
try:
|
|
88
|
+
# Start usage stats and intermediate steps collection in parallel
|
|
89
|
+
intermediate_future = pull_intermediate()
|
|
90
|
+
|
|
91
|
+
if session_manager.workflow.has_single_output:
|
|
92
|
+
base_output = await runner.result()
|
|
93
|
+
else:
|
|
94
|
+
# raise an error if the workflow has multiple outputs
|
|
95
|
+
raise NotImplementedError("Multiple outputs are not supported")
|
|
96
|
+
intermediate_steps = await intermediate_future
|
|
97
|
+
except NotImplementedError as e:
|
|
98
|
+
# raise original error
|
|
99
|
+
raise e
|
|
100
|
+
except Exception as e:
|
|
101
|
+
logger.exception("Failed to run the workflow: %s", e, exc_info=True)
|
|
102
|
+
# stop processing if a workflow error occurs
|
|
103
|
+
self.workflow_interrupted = True
|
|
104
|
+
stop_event.set()
|
|
105
|
+
return
|
|
106
|
+
|
|
107
|
+
try:
|
|
108
|
+
base_output = runner.convert(base_output, to_type=str)
|
|
109
|
+
except ValueError:
|
|
110
|
+
pass
|
|
111
|
+
|
|
112
|
+
# if base_output is a pydantic model dump it to json
|
|
113
|
+
if isinstance(base_output, BaseModel):
|
|
114
|
+
output = base_output.model_dump_json(indent=2)
|
|
115
|
+
else:
|
|
116
|
+
m = jsonpath_expr.find(base_output)
|
|
117
|
+
if (not m):
|
|
118
|
+
raise RuntimeError(f"Failed to extract output using jsonpath: {self.config.result_json_path}")
|
|
119
|
+
if (len(m) > 1):
|
|
120
|
+
logger.warning("Multiple matches found for jsonpath at row '%s'. Matches: %s. Using the first",
|
|
121
|
+
base_output,
|
|
122
|
+
m)
|
|
123
|
+
output = m[0].value
|
|
124
|
+
|
|
125
|
+
item.output_obj = output
|
|
126
|
+
item.trajectory = self.intermediate_step_adapter.validate_intermediate_steps(intermediate_steps)
|
|
127
|
+
|
|
128
|
+
async def wrapped_run(item: EvalInputItem) -> None:
|
|
129
|
+
await run_one(item)
|
|
130
|
+
pbar.update(1)
|
|
131
|
+
|
|
132
|
+
# if self.config.skip_complete is set skip eval_input_items with a non-empty output_obj
|
|
133
|
+
if self.config.skip_completed_entries:
|
|
134
|
+
eval_input_items = [item for item in self.eval_input.eval_input_items if not item.output_obj]
|
|
135
|
+
if not eval_input_items:
|
|
136
|
+
logger.warning("All items have a non-empty output. Skipping workflow pass altogether.")
|
|
137
|
+
return
|
|
138
|
+
else:
|
|
139
|
+
eval_input_items = self.eval_input.eval_input_items
|
|
140
|
+
pbar = tqdm(total=len(eval_input_items), desc="Running workflow")
|
|
141
|
+
await asyncio.gather(*[wrapped_run(item) for item in eval_input_items])
|
|
142
|
+
pbar.close()
|
|
143
|
+
|
|
144
|
+
async def run_workflow_remote(self):
|
|
145
|
+
from aiq.eval.remote_workflow import EvaluationRemoteWorkflowHandler
|
|
146
|
+
handler = EvaluationRemoteWorkflowHandler(self.config, self.eval_config.general.max_concurrency)
|
|
147
|
+
await handler.run_workflow_remote(self.eval_input)
|
|
148
|
+
|
|
149
|
+
async def profile_workflow(self):
|
|
150
|
+
"""
|
|
151
|
+
Profile a dataset
|
|
152
|
+
"""
|
|
153
|
+
|
|
154
|
+
if not self.eval_config.general.profiler:
|
|
155
|
+
logger.info("Profiler is not enabled. Skipping profiling.")
|
|
156
|
+
return
|
|
157
|
+
|
|
158
|
+
from aiq.profiler.profile_runner import ProfilerRunner
|
|
159
|
+
|
|
160
|
+
all_stats = []
|
|
161
|
+
for input_item in self.eval_input.eval_input_items:
|
|
162
|
+
all_stats.append(input_item.trajectory)
|
|
163
|
+
|
|
164
|
+
profiler_runner = ProfilerRunner(self.eval_config.general.profiler, self.eval_config.general.output_dir)
|
|
165
|
+
|
|
166
|
+
await profiler_runner.run(all_stats)
|
|
167
|
+
|
|
168
|
+
def cleanup_output_directory(self):
|
|
169
|
+
'''Remove contents of the output directory if it exists'''
|
|
170
|
+
if self.eval_config.general.output and self.eval_config.general.output.dir and \
|
|
171
|
+
self.eval_config.general.output.dir.exists():
|
|
172
|
+
logger.info("Cleaning up output directory %s", self.eval_config.general.output.dir)
|
|
173
|
+
shutil.rmtree(self.eval_config.general.output.dir)
|
|
174
|
+
|
|
175
|
+
def write_output(self, dataset_handler: DatasetHandler):
|
|
176
|
+
workflow_output_file = self.eval_config.general.output_dir / "workflow_output.json"
|
|
177
|
+
workflow_output_file.parent.mkdir(parents=True, exist_ok=True)
|
|
178
|
+
|
|
179
|
+
# Write the workflow output to a file (this can be used for re-running the evaluation)
|
|
180
|
+
workflow_output = dataset_handler.publish_eval_input(self.eval_input)
|
|
181
|
+
with open(workflow_output_file, "w", encoding="utf-8") as f:
|
|
182
|
+
# set indent to 2 for pretty printing
|
|
183
|
+
f.write(workflow_output)
|
|
184
|
+
self.workflow_output_file = workflow_output_file
|
|
185
|
+
logger.info("Workflow output written to %s", workflow_output_file)
|
|
186
|
+
|
|
187
|
+
# Write the output of each evaluator to a separate json file
|
|
188
|
+
for evaluator_name, eval_output in self.evaluation_results:
|
|
189
|
+
output_file = self.eval_config.general.output_dir / f"{evaluator_name}_output.json"
|
|
190
|
+
output_file.parent.mkdir(parents=True, exist_ok=True)
|
|
191
|
+
# create json content using the evaluation results
|
|
192
|
+
output = eval_output.model_dump_json(indent=2)
|
|
193
|
+
with open(output_file, "w", encoding="utf-8") as f:
|
|
194
|
+
f.write(output)
|
|
195
|
+
self.evaluator_output_files.append(output_file)
|
|
196
|
+
logger.info("Evaluation results written to %s", output_file)
|
|
197
|
+
|
|
198
|
+
if self.workflow_interrupted:
|
|
199
|
+
# Issue a warning if the workflow was not completed on all datasets
|
|
200
|
+
msg = ("Workflow execution was interrupted due to an error. The results may be incomplete. "
|
|
201
|
+
"You can re-execute evaluation for incomplete results by running "
|
|
202
|
+
"`eval` with the --skip_completed_entries flag.")
|
|
203
|
+
logger.warning(msg)
|
|
204
|
+
|
|
205
|
+
async def run_single_evaluator(self, evaluator_name: str, evaluator: Any):
|
|
206
|
+
"""Run a single evaluator and store its results."""
|
|
207
|
+
try:
|
|
208
|
+
eval_output = await evaluator.evaluate_fn(self.eval_input)
|
|
209
|
+
self.evaluation_results.append((evaluator_name, eval_output))
|
|
210
|
+
except Exception as e:
|
|
211
|
+
logger.exception("An error occurred while running evaluator %s: %s", evaluator_name, e, exc_info=True)
|
|
212
|
+
|
|
213
|
+
async def run_evaluators(self, evaluators: dict[str, Any]):
|
|
214
|
+
"""Run all configured evaluators asynchronously."""
|
|
215
|
+
tasks = [self.run_single_evaluator(name, evaluator) for name, evaluator in evaluators.items() if evaluator]
|
|
216
|
+
|
|
217
|
+
if not tasks:
|
|
218
|
+
logger.warning("All evaluators were empty or invalid.")
|
|
219
|
+
return
|
|
220
|
+
|
|
221
|
+
try:
|
|
222
|
+
await asyncio.gather(*tasks)
|
|
223
|
+
except Exception as e:
|
|
224
|
+
logger.exception("An error occurred while running evaluators: %s", e, exc_info=True)
|
|
225
|
+
raise
|
|
226
|
+
|
|
227
|
+
def apply_overrides(self):
|
|
228
|
+
from aiq.cli.cli_utils.config_override import load_and_override_config
|
|
229
|
+
from aiq.data_models.config import AIQConfig
|
|
230
|
+
from aiq.runtime.loader import PluginTypes
|
|
231
|
+
from aiq.runtime.loader import discover_and_register_plugins
|
|
232
|
+
from aiq.utils.data_models.schema_validator import validate_schema
|
|
233
|
+
|
|
234
|
+
# Register plugins before validation
|
|
235
|
+
discover_and_register_plugins(PluginTypes.CONFIG_OBJECT)
|
|
236
|
+
|
|
237
|
+
config_dict = load_and_override_config(self.config.config_file, self.config.override)
|
|
238
|
+
config = validate_schema(config_dict, AIQConfig)
|
|
239
|
+
return config
|
|
240
|
+
|
|
241
|
+
async def run_and_evaluate(self,
|
|
242
|
+
session_manager: AIQSessionManager | None = None,
|
|
243
|
+
job_id: str | None = None) -> EvaluationRunOutput:
|
|
244
|
+
"""
|
|
245
|
+
Run the workflow with the specified config file and evaluate the dataset
|
|
246
|
+
"""
|
|
247
|
+
logger.info("Starting evaluation run with config file: %s", self.config.config_file)
|
|
248
|
+
|
|
249
|
+
from aiq.builder.eval_builder import WorkflowEvalBuilder
|
|
250
|
+
from aiq.runtime.loader import load_config
|
|
251
|
+
|
|
252
|
+
# Load and override the config
|
|
253
|
+
if self.config.override:
|
|
254
|
+
config = self.apply_overrides()
|
|
255
|
+
else:
|
|
256
|
+
config = load_config(self.config.config_file)
|
|
257
|
+
self.eval_config = config.eval
|
|
258
|
+
logger.debug("Loaded evaluation configuration: %s", self.eval_config)
|
|
259
|
+
|
|
260
|
+
# Cleanup the output directory
|
|
261
|
+
if self.eval_config.general.output and self.eval_config.general.output.cleanup:
|
|
262
|
+
self.cleanup_output_directory()
|
|
263
|
+
|
|
264
|
+
# If a job id is provided keep the data per-job
|
|
265
|
+
if job_id:
|
|
266
|
+
self.eval_config.general.output_dir = self.eval_config.general.output_dir / f"jobs/{job_id}"
|
|
267
|
+
if self.eval_config.general.output:
|
|
268
|
+
self.eval_config.general.output.dir = self.eval_config.general.output_dir
|
|
269
|
+
|
|
270
|
+
# Load the input dataset
|
|
271
|
+
# For multiple datasets, one handler per dataset can be created
|
|
272
|
+
dataset_config = self.eval_config.general.dataset # Currently only one dataset is supported
|
|
273
|
+
if not dataset_config:
|
|
274
|
+
logger.info("No dataset found, nothing to evaluate")
|
|
275
|
+
return EvaluationRunOutput(
|
|
276
|
+
workflow_output_file=self.workflow_output_file,
|
|
277
|
+
evaluator_output_files=self.evaluator_output_files,
|
|
278
|
+
workflow_interrupted=self.workflow_interrupted,
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
dataset_handler = DatasetHandler(dataset_config=dataset_config, reps=self.config.reps)
|
|
282
|
+
self.eval_input = dataset_handler.get_eval_input_from_dataset(self.config.dataset)
|
|
283
|
+
if not self.eval_input.eval_input_items:
|
|
284
|
+
logger.info("Dataset is empty. Nothing to evaluate.")
|
|
285
|
+
return EvaluationRunOutput(
|
|
286
|
+
workflow_output_file=self.workflow_output_file,
|
|
287
|
+
evaluator_output_files=self.evaluator_output_files,
|
|
288
|
+
workflow_interrupted=self.workflow_interrupted,
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
# Run workflow and evaluate
|
|
292
|
+
async with WorkflowEvalBuilder.from_config(config=config) as eval_workflow:
|
|
293
|
+
if self.config.endpoint:
|
|
294
|
+
await self.run_workflow_remote()
|
|
295
|
+
else:
|
|
296
|
+
if not self.config.skip_workflow:
|
|
297
|
+
if session_manager is None:
|
|
298
|
+
session_manager = AIQSessionManager(eval_workflow.build(),
|
|
299
|
+
max_concurrency=self.eval_config.general.max_concurrency)
|
|
300
|
+
await self.run_workflow_local(session_manager)
|
|
301
|
+
|
|
302
|
+
# Evaluate
|
|
303
|
+
evaluators = {name: eval_workflow.get_evaluator(name) for name in self.eval_config.evaluators}
|
|
304
|
+
await self.run_evaluators(evaluators)
|
|
305
|
+
|
|
306
|
+
# Profile the workflow
|
|
307
|
+
await self.profile_workflow()
|
|
308
|
+
|
|
309
|
+
# Write the results to the output directory
|
|
310
|
+
self.write_output(dataset_handler)
|
|
311
|
+
|
|
312
|
+
# Run custom scripts and upload evaluation outputs to S3
|
|
313
|
+
if self.eval_config.general.output:
|
|
314
|
+
output_uploader = OutputUploader(self.eval_config.general.output, job_id=job_id)
|
|
315
|
+
output_uploader.run_custom_scripts()
|
|
316
|
+
await output_uploader.upload_directory()
|
|
317
|
+
|
|
318
|
+
return EvaluationRunOutput(
|
|
319
|
+
workflow_output_file=self.workflow_output_file,
|
|
320
|
+
evaluator_output_files=self.evaluator_output_files,
|
|
321
|
+
workflow_interrupted=self.workflow_interrupted,
|
|
322
|
+
)
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import typing
|
|
17
|
+
|
|
18
|
+
from pydantic import BaseModel
|
|
19
|
+
|
|
20
|
+
from aiq.data_models.intermediate_step import IntermediateStep
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class EvalInputItem(BaseModel):
|
|
24
|
+
id: typing.Any
|
|
25
|
+
input_obj: typing.Any
|
|
26
|
+
expected_output_obj: typing.Any
|
|
27
|
+
output_obj: typing.Any
|
|
28
|
+
expected_trajectory: list[IntermediateStep]
|
|
29
|
+
trajectory: list[IntermediateStep]
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class EvalInput(BaseModel):
|
|
33
|
+
eval_input_items: list[EvalInputItem]
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class EvalOutputItem(BaseModel):
|
|
37
|
+
id: typing.Any # id or input_obj from EvalInputItem
|
|
38
|
+
score: typing.Any # float or any serializable type
|
|
39
|
+
reasoning: typing.Any
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class EvalOutput(BaseModel):
|
|
43
|
+
average_score: typing.Any # float or any serializable type
|
|
44
|
+
eval_output_items: list[EvalOutputItem]
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import logging
|
|
17
|
+
|
|
18
|
+
from langchain_core.agents import AgentAction
|
|
19
|
+
|
|
20
|
+
from aiq.data_models.intermediate_step import IntermediateStep
|
|
21
|
+
from aiq.data_models.intermediate_step import IntermediateStepType
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class IntermediateStepAdapter:
|
|
27
|
+
DEFAULT_EVENT_FILTER = [IntermediateStepType.LLM_END, IntermediateStepType.TOOL_END]
|
|
28
|
+
|
|
29
|
+
def filter_intermediate_steps(self,
|
|
30
|
+
intermediate_steps: list[IntermediateStep],
|
|
31
|
+
event_filter: list[IntermediateStepType]) -> list[IntermediateStep]:
|
|
32
|
+
""" Filters intermediate steps"""
|
|
33
|
+
if not event_filter:
|
|
34
|
+
return intermediate_steps
|
|
35
|
+
return [step for step in intermediate_steps if step.event_type in event_filter]
|
|
36
|
+
|
|
37
|
+
def validate_intermediate_steps(self, intermediate_steps: list[dict]) -> list[IntermediateStep]:
|
|
38
|
+
validated_steps = []
|
|
39
|
+
for step_data in intermediate_steps:
|
|
40
|
+
try:
|
|
41
|
+
validated_steps.append(IntermediateStep.model_validate(step_data))
|
|
42
|
+
except Exception as e:
|
|
43
|
+
logger.exception("Validation failed for step: %r, Error: %s", step_data, e, exc_info=True)
|
|
44
|
+
return validated_steps
|
|
45
|
+
|
|
46
|
+
def serialize_intermediate_steps(self, intermediate_steps: list[IntermediateStep]) -> list[dict]:
|
|
47
|
+
"""Converts a list of IntermediateStep objects to a list of dictionaries."""
|
|
48
|
+
return [step.model_dump() for step in intermediate_steps]
|
|
49
|
+
|
|
50
|
+
@staticmethod
|
|
51
|
+
def agent_action_to_dict(action) -> dict:
|
|
52
|
+
"""Convert AgentAction to a JSON-serializable dictionary."""
|
|
53
|
+
return {
|
|
54
|
+
"tool": action.tool,
|
|
55
|
+
"tool_input": action.tool_input,
|
|
56
|
+
"log": action.log,
|
|
57
|
+
"type": action.type,
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
def get_agent_action_single(self, step: IntermediateStep,
|
|
61
|
+
last_llm_end_step: IntermediateStep | None) -> tuple[AgentAction, str]:
|
|
62
|
+
"""Converts a single intermediate step to Tuple[AgentAction, str]."""
|
|
63
|
+
# use the previous llm output as log
|
|
64
|
+
log = getattr(last_llm_end_step.data, "output", "") if last_llm_end_step else ""
|
|
65
|
+
tool_name = step.name or ""
|
|
66
|
+
tool_input = getattr(step.data, "input", "") if step.data else ""
|
|
67
|
+
tool_output = getattr(step.data, "output", "") if step.data else ""
|
|
68
|
+
|
|
69
|
+
action = AgentAction(tool=tool_name, tool_input=tool_input, log=log)
|
|
70
|
+
|
|
71
|
+
return action, tool_output
|
|
72
|
+
|
|
73
|
+
def get_agent_actions(self, intermediate_steps: list[IntermediateStep],
|
|
74
|
+
event_filter: list[IntermediateStepType]) -> list[tuple[AgentAction, str]]:
|
|
75
|
+
"""Converts a list of intermediate steps to a list of (AgentAction, output)."""
|
|
76
|
+
steps = self.filter_intermediate_steps(intermediate_steps, event_filter)
|
|
77
|
+
last_llm_end_step = None
|
|
78
|
+
agent_actions = []
|
|
79
|
+
for step in steps:
|
|
80
|
+
if step.event_type == IntermediateStepType.LLM_END:
|
|
81
|
+
last_llm_end_step = step
|
|
82
|
+
else:
|
|
83
|
+
action = self.get_agent_action_single(step, last_llm_end_step)
|
|
84
|
+
agent_actions.append(action)
|
|
85
|
+
|
|
86
|
+
return agent_actions
|
|
87
|
+
|
|
88
|
+
def get_context(self, intermediate_steps: list[IntermediateStep]) -> list[str]:
|
|
89
|
+
"""Grab the output of all the tools and return them as retrieved context."""
|
|
90
|
+
return [
|
|
91
|
+
str(step.data.output) for step in intermediate_steps
|
|
92
|
+
if step.event_type == IntermediateStepType.TOOL_END and step.data and step.data.output
|
|
93
|
+
]
|
|
File without changes
|