aiqtoolkit 1.2.0.dev0__py3-none-any.whl → 1.2.0rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiqtoolkit might be problematic. Click here for more details.
- aiq/agent/base.py +170 -8
- aiq/agent/dual_node.py +1 -1
- aiq/agent/react_agent/agent.py +146 -112
- aiq/agent/react_agent/prompt.py +1 -6
- aiq/agent/react_agent/register.py +36 -35
- aiq/agent/rewoo_agent/agent.py +36 -35
- aiq/agent/rewoo_agent/register.py +2 -2
- aiq/agent/tool_calling_agent/agent.py +3 -7
- aiq/agent/tool_calling_agent/register.py +1 -1
- aiq/authentication/__init__.py +14 -0
- aiq/authentication/api_key/__init__.py +14 -0
- aiq/authentication/api_key/api_key_auth_provider.py +92 -0
- aiq/authentication/api_key/api_key_auth_provider_config.py +124 -0
- aiq/authentication/api_key/register.py +26 -0
- aiq/authentication/exceptions/__init__.py +14 -0
- aiq/authentication/exceptions/api_key_exceptions.py +38 -0
- aiq/authentication/exceptions/auth_code_grant_exceptions.py +86 -0
- aiq/authentication/exceptions/call_back_exceptions.py +38 -0
- aiq/authentication/exceptions/request_exceptions.py +54 -0
- aiq/authentication/http_basic_auth/__init__.py +0 -0
- aiq/authentication/http_basic_auth/http_basic_auth_provider.py +81 -0
- aiq/authentication/http_basic_auth/register.py +30 -0
- aiq/authentication/interfaces.py +93 -0
- aiq/authentication/oauth2/__init__.py +14 -0
- aiq/authentication/oauth2/oauth2_auth_code_flow_provider.py +107 -0
- aiq/authentication/oauth2/oauth2_auth_code_flow_provider_config.py +39 -0
- aiq/authentication/oauth2/register.py +25 -0
- aiq/authentication/register.py +21 -0
- aiq/builder/builder.py +64 -2
- aiq/builder/component_utils.py +16 -3
- aiq/builder/context.py +37 -0
- aiq/builder/eval_builder.py +43 -2
- aiq/builder/function.py +44 -12
- aiq/builder/function_base.py +1 -1
- aiq/builder/intermediate_step_manager.py +6 -8
- aiq/builder/user_interaction_manager.py +3 -0
- aiq/builder/workflow.py +23 -18
- aiq/builder/workflow_builder.py +421 -61
- aiq/cli/commands/info/list_mcp.py +103 -16
- aiq/cli/commands/sizing/__init__.py +14 -0
- aiq/cli/commands/sizing/calc.py +294 -0
- aiq/cli/commands/sizing/sizing.py +27 -0
- aiq/cli/commands/start.py +2 -1
- aiq/cli/entrypoint.py +2 -0
- aiq/cli/register_workflow.py +80 -0
- aiq/cli/type_registry.py +151 -30
- aiq/data_models/api_server.py +124 -12
- aiq/data_models/authentication.py +231 -0
- aiq/data_models/common.py +35 -7
- aiq/data_models/component.py +17 -9
- aiq/data_models/component_ref.py +33 -0
- aiq/data_models/config.py +60 -3
- aiq/data_models/dataset_handler.py +2 -1
- aiq/data_models/embedder.py +1 -0
- aiq/data_models/evaluate.py +23 -0
- aiq/data_models/function_dependencies.py +8 -0
- aiq/data_models/interactive.py +10 -1
- aiq/data_models/intermediate_step.py +38 -5
- aiq/data_models/its_strategy.py +30 -0
- aiq/data_models/llm.py +1 -0
- aiq/data_models/memory.py +1 -0
- aiq/data_models/object_store.py +44 -0
- aiq/data_models/profiler.py +1 -0
- aiq/data_models/retry_mixin.py +35 -0
- aiq/data_models/span.py +187 -0
- aiq/data_models/telemetry_exporter.py +2 -2
- aiq/embedder/nim_embedder.py +2 -1
- aiq/embedder/openai_embedder.py +2 -1
- aiq/eval/config.py +19 -1
- aiq/eval/dataset_handler/dataset_handler.py +87 -2
- aiq/eval/evaluate.py +208 -27
- aiq/eval/evaluator/base_evaluator.py +73 -0
- aiq/eval/evaluator/evaluator_model.py +1 -0
- aiq/eval/intermediate_step_adapter.py +11 -5
- aiq/eval/rag_evaluator/evaluate.py +55 -15
- aiq/eval/rag_evaluator/register.py +6 -1
- aiq/eval/remote_workflow.py +7 -2
- aiq/eval/runners/__init__.py +14 -0
- aiq/eval/runners/config.py +39 -0
- aiq/eval/runners/multi_eval_runner.py +54 -0
- aiq/eval/trajectory_evaluator/evaluate.py +22 -65
- aiq/eval/tunable_rag_evaluator/evaluate.py +150 -168
- aiq/eval/tunable_rag_evaluator/register.py +2 -0
- aiq/eval/usage_stats.py +41 -0
- aiq/eval/utils/output_uploader.py +10 -1
- aiq/eval/utils/weave_eval.py +184 -0
- aiq/experimental/__init__.py +0 -0
- aiq/experimental/decorators/__init__.py +0 -0
- aiq/experimental/decorators/experimental_warning_decorator.py +130 -0
- aiq/experimental/inference_time_scaling/__init__.py +0 -0
- aiq/experimental/inference_time_scaling/editing/__init__.py +0 -0
- aiq/experimental/inference_time_scaling/editing/iterative_plan_refinement_editor.py +147 -0
- aiq/experimental/inference_time_scaling/editing/llm_as_a_judge_editor.py +204 -0
- aiq/experimental/inference_time_scaling/editing/motivation_aware_summarization.py +107 -0
- aiq/experimental/inference_time_scaling/functions/__init__.py +0 -0
- aiq/experimental/inference_time_scaling/functions/execute_score_select_function.py +105 -0
- aiq/experimental/inference_time_scaling/functions/its_tool_orchestration_function.py +205 -0
- aiq/experimental/inference_time_scaling/functions/its_tool_wrapper_function.py +146 -0
- aiq/experimental/inference_time_scaling/functions/plan_select_execute_function.py +224 -0
- aiq/experimental/inference_time_scaling/models/__init__.py +0 -0
- aiq/experimental/inference_time_scaling/models/editor_config.py +132 -0
- aiq/experimental/inference_time_scaling/models/its_item.py +48 -0
- aiq/experimental/inference_time_scaling/models/scoring_config.py +112 -0
- aiq/experimental/inference_time_scaling/models/search_config.py +120 -0
- aiq/experimental/inference_time_scaling/models/selection_config.py +154 -0
- aiq/experimental/inference_time_scaling/models/stage_enums.py +43 -0
- aiq/experimental/inference_time_scaling/models/strategy_base.py +66 -0
- aiq/experimental/inference_time_scaling/models/tool_use_config.py +41 -0
- aiq/experimental/inference_time_scaling/register.py +36 -0
- aiq/experimental/inference_time_scaling/scoring/__init__.py +0 -0
- aiq/experimental/inference_time_scaling/scoring/llm_based_agent_scorer.py +168 -0
- aiq/experimental/inference_time_scaling/scoring/llm_based_plan_scorer.py +168 -0
- aiq/experimental/inference_time_scaling/scoring/motivation_aware_scorer.py +111 -0
- aiq/experimental/inference_time_scaling/search/__init__.py +0 -0
- aiq/experimental/inference_time_scaling/search/multi_llm_planner.py +128 -0
- aiq/experimental/inference_time_scaling/search/multi_query_retrieval_search.py +122 -0
- aiq/experimental/inference_time_scaling/search/single_shot_multi_plan_planner.py +128 -0
- aiq/experimental/inference_time_scaling/selection/__init__.py +0 -0
- aiq/experimental/inference_time_scaling/selection/best_of_n_selector.py +63 -0
- aiq/experimental/inference_time_scaling/selection/llm_based_agent_output_selector.py +131 -0
- aiq/experimental/inference_time_scaling/selection/llm_based_output_merging_selector.py +159 -0
- aiq/experimental/inference_time_scaling/selection/llm_based_plan_selector.py +128 -0
- aiq/experimental/inference_time_scaling/selection/threshold_selector.py +58 -0
- aiq/front_ends/console/authentication_flow_handler.py +233 -0
- aiq/front_ends/console/console_front_end_plugin.py +11 -2
- aiq/front_ends/fastapi/auth_flow_handlers/__init__.py +0 -0
- aiq/front_ends/fastapi/auth_flow_handlers/http_flow_handler.py +27 -0
- aiq/front_ends/fastapi/auth_flow_handlers/websocket_flow_handler.py +107 -0
- aiq/front_ends/fastapi/fastapi_front_end_config.py +93 -9
- aiq/front_ends/fastapi/fastapi_front_end_controller.py +68 -0
- aiq/front_ends/fastapi/fastapi_front_end_plugin.py +14 -1
- aiq/front_ends/fastapi/fastapi_front_end_plugin_worker.py +537 -52
- aiq/front_ends/fastapi/html_snippets/__init__.py +14 -0
- aiq/front_ends/fastapi/html_snippets/auth_code_grant_success.py +35 -0
- aiq/front_ends/fastapi/job_store.py +47 -25
- aiq/front_ends/fastapi/main.py +2 -0
- aiq/front_ends/fastapi/message_handler.py +108 -89
- aiq/front_ends/fastapi/step_adaptor.py +2 -1
- aiq/llm/aws_bedrock_llm.py +57 -0
- aiq/llm/nim_llm.py +2 -1
- aiq/llm/openai_llm.py +3 -2
- aiq/llm/register.py +1 -0
- aiq/meta/pypi.md +12 -12
- aiq/object_store/__init__.py +20 -0
- aiq/object_store/in_memory_object_store.py +74 -0
- aiq/object_store/interfaces.py +84 -0
- aiq/object_store/models.py +36 -0
- aiq/object_store/register.py +20 -0
- aiq/observability/__init__.py +14 -0
- aiq/observability/exporter/__init__.py +14 -0
- aiq/observability/exporter/base_exporter.py +449 -0
- aiq/observability/exporter/exporter.py +78 -0
- aiq/observability/exporter/file_exporter.py +33 -0
- aiq/observability/exporter/processing_exporter.py +269 -0
- aiq/observability/exporter/raw_exporter.py +52 -0
- aiq/observability/exporter/span_exporter.py +264 -0
- aiq/observability/exporter_manager.py +335 -0
- aiq/observability/mixin/__init__.py +14 -0
- aiq/observability/mixin/batch_config_mixin.py +26 -0
- aiq/observability/mixin/collector_config_mixin.py +23 -0
- aiq/observability/mixin/file_mixin.py +288 -0
- aiq/observability/mixin/file_mode.py +23 -0
- aiq/observability/mixin/resource_conflict_mixin.py +134 -0
- aiq/observability/mixin/serialize_mixin.py +61 -0
- aiq/observability/mixin/type_introspection_mixin.py +183 -0
- aiq/observability/processor/__init__.py +14 -0
- aiq/observability/processor/batching_processor.py +316 -0
- aiq/observability/processor/intermediate_step_serializer.py +28 -0
- aiq/observability/processor/processor.py +68 -0
- aiq/observability/register.py +36 -39
- aiq/observability/utils/__init__.py +14 -0
- aiq/observability/utils/dict_utils.py +236 -0
- aiq/observability/utils/time_utils.py +31 -0
- aiq/profiler/calc/__init__.py +14 -0
- aiq/profiler/calc/calc_runner.py +623 -0
- aiq/profiler/calc/calculations.py +288 -0
- aiq/profiler/calc/data_models.py +176 -0
- aiq/profiler/calc/plot.py +345 -0
- aiq/profiler/callbacks/langchain_callback_handler.py +22 -10
- aiq/profiler/data_models.py +24 -0
- aiq/profiler/inference_metrics_model.py +3 -0
- aiq/profiler/inference_optimization/bottleneck_analysis/nested_stack_analysis.py +8 -0
- aiq/profiler/inference_optimization/data_models.py +2 -2
- aiq/profiler/inference_optimization/llm_metrics.py +2 -2
- aiq/profiler/profile_runner.py +61 -21
- aiq/runtime/loader.py +9 -3
- aiq/runtime/runner.py +23 -9
- aiq/runtime/session.py +25 -7
- aiq/runtime/user_metadata.py +2 -3
- aiq/tool/chat_completion.py +74 -0
- aiq/tool/code_execution/README.md +152 -0
- aiq/tool/code_execution/code_sandbox.py +151 -72
- aiq/tool/code_execution/local_sandbox/.gitignore +1 -0
- aiq/tool/code_execution/local_sandbox/local_sandbox_server.py +139 -24
- aiq/tool/code_execution/local_sandbox/sandbox.requirements.txt +3 -1
- aiq/tool/code_execution/local_sandbox/start_local_sandbox.sh +27 -2
- aiq/tool/code_execution/register.py +7 -3
- aiq/tool/code_execution/test_code_execution_sandbox.py +414 -0
- aiq/tool/mcp/exceptions.py +142 -0
- aiq/tool/mcp/mcp_client.py +41 -6
- aiq/tool/mcp/mcp_tool.py +3 -2
- aiq/tool/register.py +1 -0
- aiq/tool/server_tools.py +6 -3
- aiq/utils/exception_handlers/automatic_retries.py +289 -0
- aiq/utils/exception_handlers/mcp.py +211 -0
- aiq/utils/io/model_processing.py +28 -0
- aiq/utils/log_utils.py +37 -0
- aiq/utils/string_utils.py +38 -0
- aiq/utils/type_converter.py +18 -2
- aiq/utils/type_utils.py +87 -0
- {aiqtoolkit-1.2.0.dev0.dist-info → aiqtoolkit-1.2.0rc2.dist-info}/METADATA +53 -21
- aiqtoolkit-1.2.0rc2.dist-info/RECORD +436 -0
- {aiqtoolkit-1.2.0.dev0.dist-info → aiqtoolkit-1.2.0rc2.dist-info}/WHEEL +1 -1
- {aiqtoolkit-1.2.0.dev0.dist-info → aiqtoolkit-1.2.0rc2.dist-info}/entry_points.txt +3 -0
- aiq/front_ends/fastapi/websocket.py +0 -148
- aiq/observability/async_otel_listener.py +0 -429
- aiqtoolkit-1.2.0.dev0.dist-info/RECORD +0 -316
- {aiqtoolkit-1.2.0.dev0.dist-info → aiqtoolkit-1.2.0rc2.dist-info}/licenses/LICENSE-3rd-party.txt +0 -0
- {aiqtoolkit-1.2.0.dev0.dist-info → aiqtoolkit-1.2.0rc2.dist-info}/licenses/LICENSE.md +0 -0
- {aiqtoolkit-1.2.0.dev0.dist-info → aiqtoolkit-1.2.0rc2.dist-info}/top_level.txt +0 -0
aiq/eval/usage_stats.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import typing
|
|
17
|
+
|
|
18
|
+
from pydantic import BaseModel
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class UsageStatsLLM(BaseModel):
|
|
22
|
+
prompt_tokens: int = 0
|
|
23
|
+
completion_tokens: int = 0
|
|
24
|
+
total_tokens: int = 0
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class UsageStatsItem(BaseModel):
|
|
28
|
+
usage_stats_per_llm: dict[str, UsageStatsLLM]
|
|
29
|
+
total_tokens: int | None = None
|
|
30
|
+
runtime: float = 0.0
|
|
31
|
+
min_timestamp: float = 0.0
|
|
32
|
+
max_timestamp: float = 0.0
|
|
33
|
+
llm_latency: float = 0.0
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class UsageStats(BaseModel):
|
|
37
|
+
# key is the id or input_obj from EvalInputItem
|
|
38
|
+
min_timestamp: float = 0.0
|
|
39
|
+
max_timestamp: float = 0.0
|
|
40
|
+
total_runtime: float = 0.0
|
|
41
|
+
usage_stats_items: dict[typing.Any, UsageStatsItem] = {}
|
|
@@ -78,9 +78,18 @@ class OutputUploader:
|
|
|
78
78
|
|
|
79
79
|
session = aioboto3.Session()
|
|
80
80
|
try:
|
|
81
|
+
if self.s3_config.endpoint_url:
|
|
82
|
+
region_name = None
|
|
83
|
+
endpoint_url = self.s3_config.endpoint_url
|
|
84
|
+
elif self.s3_config.region_name:
|
|
85
|
+
region_name = self.s3_config.region_name
|
|
86
|
+
endpoint_url = None
|
|
87
|
+
else:
|
|
88
|
+
raise ValueError("No endpoint_url or region_name provided in the config: eval.general.output.s3")
|
|
81
89
|
async with session.client(
|
|
82
90
|
"s3",
|
|
83
|
-
endpoint_url=
|
|
91
|
+
endpoint_url=endpoint_url,
|
|
92
|
+
region_name=region_name,
|
|
84
93
|
aws_access_key_id=self.s3_config.access_key,
|
|
85
94
|
aws_secret_access_key=self.s3_config.secret_key,
|
|
86
95
|
) as s3_client:
|
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import asyncio
|
|
17
|
+
import logging
|
|
18
|
+
from typing import Any
|
|
19
|
+
|
|
20
|
+
from aiq.eval.evaluator.evaluator_model import EvalInput
|
|
21
|
+
from aiq.eval.evaluator.evaluator_model import EvalInputItem
|
|
22
|
+
from aiq.eval.evaluator.evaluator_model import EvalOutput
|
|
23
|
+
from aiq.eval.usage_stats import UsageStats
|
|
24
|
+
from aiq.eval.usage_stats import UsageStatsItem
|
|
25
|
+
from aiq.profiler.data_models import ProfilerResults
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class WeaveEvaluationIntegration: # pylint: disable=too-many-public-methods
|
|
31
|
+
"""
|
|
32
|
+
Class to handle all Weave integration functionality.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(self):
|
|
36
|
+
self.available = False
|
|
37
|
+
self.client = None
|
|
38
|
+
self.eval_logger = None
|
|
39
|
+
self.pred_loggers = {}
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
from weave.flow.eval_imperative import EvaluationLogger
|
|
43
|
+
from weave.flow.eval_imperative import ScoreLogger
|
|
44
|
+
from weave.trace.context import weave_client_context
|
|
45
|
+
self.EvaluationLogger = EvaluationLogger
|
|
46
|
+
self.ScoreLogger = ScoreLogger
|
|
47
|
+
self.weave_client_context = weave_client_context
|
|
48
|
+
self.available = True
|
|
49
|
+
except ImportError:
|
|
50
|
+
self.available = False
|
|
51
|
+
# we simply don't do anything if weave is not available
|
|
52
|
+
pass
|
|
53
|
+
|
|
54
|
+
def initialize_client(self):
|
|
55
|
+
"""Initialize the Weave client if available."""
|
|
56
|
+
if not self.available:
|
|
57
|
+
return False
|
|
58
|
+
|
|
59
|
+
try:
|
|
60
|
+
self.client = self.weave_client_context.require_weave_client()
|
|
61
|
+
return self.client is not None
|
|
62
|
+
except Exception:
|
|
63
|
+
self.client = None
|
|
64
|
+
return False
|
|
65
|
+
|
|
66
|
+
def _get_prediction_inputs(self, item: EvalInputItem):
|
|
67
|
+
"""Get the inputs for displaying in the UI.
|
|
68
|
+
The following fields are excluded as they are too large to display in the UI:
|
|
69
|
+
- full_dataset_entry
|
|
70
|
+
- expected_trajectory
|
|
71
|
+
- trajectory
|
|
72
|
+
|
|
73
|
+
output_obj is excluded because it is displayed separately.
|
|
74
|
+
"""
|
|
75
|
+
include = {"id", "input_obj", "expected_output_obj"}
|
|
76
|
+
return item.model_dump(include=include)
|
|
77
|
+
|
|
78
|
+
def _get_weave_dataset(self, eval_input: EvalInput):
|
|
79
|
+
"""Get the full dataset for Weave."""
|
|
80
|
+
return [item.full_dataset_entry for item in eval_input.eval_input_items]
|
|
81
|
+
|
|
82
|
+
def initialize_logger(self, workflow_alias: str, eval_input: EvalInput, config: Any):
|
|
83
|
+
"""Initialize the Weave evaluation logger."""
|
|
84
|
+
if not self.client and not self.initialize_client():
|
|
85
|
+
# lazy init the client
|
|
86
|
+
return False
|
|
87
|
+
|
|
88
|
+
try:
|
|
89
|
+
weave_dataset = self._get_weave_dataset(eval_input)
|
|
90
|
+
config_dict = config.model_dump(mode="json")
|
|
91
|
+
config_dict["name"] = workflow_alias
|
|
92
|
+
self.eval_logger = self.EvaluationLogger(model=config_dict, dataset=weave_dataset)
|
|
93
|
+
self.pred_loggers = {}
|
|
94
|
+
|
|
95
|
+
return True
|
|
96
|
+
except Exception as e:
|
|
97
|
+
self.eval_logger = None
|
|
98
|
+
logger.warning("Failed to initialize Weave `EvaluationLogger`: %s", e)
|
|
99
|
+
|
|
100
|
+
return False
|
|
101
|
+
|
|
102
|
+
def log_prediction(self, item: EvalInputItem, output: Any):
|
|
103
|
+
"""Log a prediction to Weave."""
|
|
104
|
+
if not self.eval_logger:
|
|
105
|
+
return
|
|
106
|
+
|
|
107
|
+
pred_logger = self.eval_logger.log_prediction(inputs=self._get_prediction_inputs(item), output=output)
|
|
108
|
+
self.pred_loggers[item.id] = pred_logger
|
|
109
|
+
|
|
110
|
+
async def log_usage_stats(self, item: EvalInputItem, usage_stats_item: UsageStatsItem):
|
|
111
|
+
"""Log usage stats to Weave."""
|
|
112
|
+
if not self.eval_logger:
|
|
113
|
+
return
|
|
114
|
+
|
|
115
|
+
# log each usage stat as a score
|
|
116
|
+
await self.pred_loggers[item.id].alog_score(scorer="wf_runtime", score=usage_stats_item.runtime)
|
|
117
|
+
|
|
118
|
+
# log the total tokens for this item, per-llm tokens can be exported later if needed
|
|
119
|
+
await self.pred_loggers[item.id].alog_score(scorer="wf_tokens", score=usage_stats_item.total_tokens)
|
|
120
|
+
|
|
121
|
+
async def alog_score(self, eval_output: EvalOutput, evaluator_name: str):
|
|
122
|
+
"""Log scores for evaluation outputs."""
|
|
123
|
+
if not self.eval_logger:
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
# Create coroutines for all score logging operations
|
|
127
|
+
coros = []
|
|
128
|
+
for eval_output_item in eval_output.eval_output_items:
|
|
129
|
+
if eval_output_item.id in self.pred_loggers:
|
|
130
|
+
coros.append(self.pred_loggers[eval_output_item.id].alog_score(
|
|
131
|
+
scorer=evaluator_name,
|
|
132
|
+
score=eval_output_item.score,
|
|
133
|
+
))
|
|
134
|
+
|
|
135
|
+
# Execute all coroutines concurrently
|
|
136
|
+
if coros:
|
|
137
|
+
await asyncio.gather(*coros)
|
|
138
|
+
|
|
139
|
+
async def afinish_loggers(self):
|
|
140
|
+
"""Finish all prediction loggers."""
|
|
141
|
+
if not self.eval_logger:
|
|
142
|
+
return
|
|
143
|
+
|
|
144
|
+
async def _finish_one(pred_logger):
|
|
145
|
+
if hasattr(pred_logger, '_has_finished') and not pred_logger._has_finished:
|
|
146
|
+
return
|
|
147
|
+
# run the *blocking* finish() in a thread so we don't nest loops
|
|
148
|
+
await asyncio.to_thread(pred_logger.finish)
|
|
149
|
+
|
|
150
|
+
await asyncio.gather(*[_finish_one(pl) for pl in self.pred_loggers.values()])
|
|
151
|
+
|
|
152
|
+
def _log_profiler_metrics(self, profiler_results: ProfilerResults, usage_stats: UsageStats) -> dict[str, Any]:
|
|
153
|
+
"""Log profiler metrics to Weave."""
|
|
154
|
+
profile_metrics = {}
|
|
155
|
+
if profiler_results.llm_latency_ci:
|
|
156
|
+
profile_metrics["llm_latency_p95"] = profiler_results.llm_latency_ci.p95
|
|
157
|
+
if profiler_results.workflow_runtime_metrics:
|
|
158
|
+
profile_metrics["wf_runtime_p95"] = profiler_results.workflow_runtime_metrics.p95
|
|
159
|
+
|
|
160
|
+
# TODO:get the LLM tokens from the usage stats and log them
|
|
161
|
+
profile_metrics["total_runtime"] = usage_stats.total_runtime
|
|
162
|
+
|
|
163
|
+
return profile_metrics
|
|
164
|
+
|
|
165
|
+
def log_summary(self,
|
|
166
|
+
usage_stats: UsageStats,
|
|
167
|
+
evaluation_results: list[tuple[str, EvalOutput]],
|
|
168
|
+
profiler_results: ProfilerResults):
|
|
169
|
+
"""Log summary statistics to Weave."""
|
|
170
|
+
if not self.eval_logger:
|
|
171
|
+
return
|
|
172
|
+
|
|
173
|
+
summary = {}
|
|
174
|
+
# add evaluation results to the summary
|
|
175
|
+
for evaluator_name, eval_output in evaluation_results:
|
|
176
|
+
summary[evaluator_name] = eval_output.average_score
|
|
177
|
+
|
|
178
|
+
# add profiler metrics to the summary
|
|
179
|
+
profile_metrics = self._log_profiler_metrics(profiler_results, usage_stats)
|
|
180
|
+
summary.update(profile_metrics)
|
|
181
|
+
|
|
182
|
+
# Log the summary to finish the evaluation, disable auto-summarize
|
|
183
|
+
# as we will be adding profiler metrics to the summary
|
|
184
|
+
self.eval_logger.log_summary(summary, auto_summarize=False)
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import functools
|
|
17
|
+
import inspect
|
|
18
|
+
import logging
|
|
19
|
+
from typing import Any
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
BASE_WARNING_MESSAGE = ("is experimental and the API may change in future releases. "
|
|
24
|
+
"Future versions may introduce breaking changes without notice.")
|
|
25
|
+
|
|
26
|
+
_warning_issued = set()
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def issue_experimental_warning(function_name: str,
|
|
30
|
+
feature_name: str | None = None,
|
|
31
|
+
metadata: dict[str, Any] | None = None):
|
|
32
|
+
"""
|
|
33
|
+
Log a warning message that the function is experimental.
|
|
34
|
+
|
|
35
|
+
A warning is emitted only once per function. When a ``metadata`` dict
|
|
36
|
+
is supplied, it is appended to the log entry to provide extra context
|
|
37
|
+
(e.g., version, author, feature flag).
|
|
38
|
+
"""
|
|
39
|
+
if function_name not in _warning_issued:
|
|
40
|
+
if (feature_name):
|
|
41
|
+
warning_message = f"The {feature_name} feature {BASE_WARNING_MESSAGE}"
|
|
42
|
+
else:
|
|
43
|
+
warning_message = f"This function {BASE_WARNING_MESSAGE}"
|
|
44
|
+
|
|
45
|
+
warning_message += f" Function: {function_name}"
|
|
46
|
+
|
|
47
|
+
if (metadata):
|
|
48
|
+
warning_message += f" | Metadata: {metadata}"
|
|
49
|
+
|
|
50
|
+
# Issue warning and save function name to avoid duplicate warnings
|
|
51
|
+
logger.warning(warning_message)
|
|
52
|
+
|
|
53
|
+
_warning_issued.add(function_name)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def aiq_experimental(func: Any = None, *, feature_name: str | None = None, metadata: dict[str, Any] | None = None):
|
|
57
|
+
"""
|
|
58
|
+
Decorator that can wrap any type of function (sync, async, generator,
|
|
59
|
+
async generator) and logs a warning that the function is experimental.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
func: The function to be decorated.
|
|
63
|
+
feature_name: Optional name of the feature that is experimental. If provided, the warning will be
|
|
64
|
+
prefixed with "The <feature_name> feature is experimental".
|
|
65
|
+
metadata: Optional dictionary of metadata to log with the warning. This can include information
|
|
66
|
+
like version, author, etc. If provided, the metadata will be
|
|
67
|
+
logged alongside the experimental warning.
|
|
68
|
+
"""
|
|
69
|
+
function_name: str = f"{func.__module__}.{func.__qualname__}" if func else "<unknown_function>"
|
|
70
|
+
|
|
71
|
+
# If called as @track_function(...) but not immediately passed a function
|
|
72
|
+
if func is None:
|
|
73
|
+
|
|
74
|
+
def decorator_wrapper(actual_func):
|
|
75
|
+
return aiq_experimental(actual_func, feature_name=feature_name, metadata=metadata)
|
|
76
|
+
|
|
77
|
+
return decorator_wrapper
|
|
78
|
+
|
|
79
|
+
# --- Validate metadata ---
|
|
80
|
+
if metadata is not None:
|
|
81
|
+
if not isinstance(metadata, dict):
|
|
82
|
+
raise TypeError("metadata must be a dict[str, Any].")
|
|
83
|
+
if any(not isinstance(k, str) for k in metadata.keys()):
|
|
84
|
+
raise TypeError("All metadata keys must be strings.")
|
|
85
|
+
|
|
86
|
+
# --- Now detect the function type and wrap accordingly ---
|
|
87
|
+
if inspect.isasyncgenfunction(func):
|
|
88
|
+
# ---------------------
|
|
89
|
+
# ASYNC GENERATOR
|
|
90
|
+
# ---------------------
|
|
91
|
+
|
|
92
|
+
@functools.wraps(func)
|
|
93
|
+
async def async_gen_wrapper(*args, **kwargs):
|
|
94
|
+
issue_experimental_warning(function_name, feature_name, metadata)
|
|
95
|
+
async for item in func(*args, **kwargs):
|
|
96
|
+
yield item # yield the original item
|
|
97
|
+
|
|
98
|
+
return async_gen_wrapper
|
|
99
|
+
|
|
100
|
+
if inspect.iscoroutinefunction(func):
|
|
101
|
+
# ---------------------
|
|
102
|
+
# ASYNC FUNCTION
|
|
103
|
+
# ---------------------
|
|
104
|
+
@functools.wraps(func)
|
|
105
|
+
async def async_wrapper(*args, **kwargs):
|
|
106
|
+
issue_experimental_warning(function_name, feature_name, metadata)
|
|
107
|
+
result = await func(*args, **kwargs)
|
|
108
|
+
return result
|
|
109
|
+
|
|
110
|
+
return async_wrapper
|
|
111
|
+
|
|
112
|
+
if inspect.isgeneratorfunction(func):
|
|
113
|
+
# ---------------------
|
|
114
|
+
# SYNC GENERATOR
|
|
115
|
+
# ---------------------
|
|
116
|
+
@functools.wraps(func)
|
|
117
|
+
def sync_gen_wrapper(*args, **kwargs):
|
|
118
|
+
issue_experimental_warning(function_name, feature_name, metadata)
|
|
119
|
+
for item in func(*args, **kwargs):
|
|
120
|
+
yield item # yield the original item
|
|
121
|
+
|
|
122
|
+
return sync_gen_wrapper
|
|
123
|
+
|
|
124
|
+
@functools.wraps(func)
|
|
125
|
+
def sync_wrapper(*args, **kwargs):
|
|
126
|
+
issue_experimental_warning(function_name, feature_name, metadata)
|
|
127
|
+
result = func(*args, **kwargs)
|
|
128
|
+
return result
|
|
129
|
+
|
|
130
|
+
return sync_wrapper
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import asyncio
|
|
17
|
+
import logging
|
|
18
|
+
import re
|
|
19
|
+
|
|
20
|
+
from aiq.builder.builder import Builder
|
|
21
|
+
from aiq.builder.framework_enum import LLMFrameworkEnum
|
|
22
|
+
from aiq.cli.register_workflow import register_its_strategy
|
|
23
|
+
from aiq.data_models.its_strategy import ITSStrategyBaseConfig
|
|
24
|
+
from aiq.experimental.inference_time_scaling.models.editor_config import IterativePlanRefinementConfig
|
|
25
|
+
from aiq.experimental.inference_time_scaling.models.its_item import ITSItem
|
|
26
|
+
from aiq.experimental.inference_time_scaling.models.stage_enums import PipelineTypeEnum
|
|
27
|
+
from aiq.experimental.inference_time_scaling.models.stage_enums import StageTypeEnum
|
|
28
|
+
from aiq.experimental.inference_time_scaling.models.strategy_base import StrategyBase
|
|
29
|
+
from aiq.utils.io.model_processing import remove_r1_think_tags
|
|
30
|
+
|
|
31
|
+
logger = logging.getLogger(__name__)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class IterativePlanRefinementEditor(StrategyBase):
|
|
35
|
+
"""
|
|
36
|
+
A planner that generates an initial plan, then refines it multiple times
|
|
37
|
+
using the same LLM. Each iteration updates the plan to (hopefully) be better.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(self, config: ITSStrategyBaseConfig) -> None:
|
|
41
|
+
super().__init__(config)
|
|
42
|
+
self.llm_bound = None
|
|
43
|
+
|
|
44
|
+
def supported_pipeline_types(self) -> [PipelineTypeEnum]:
|
|
45
|
+
return [PipelineTypeEnum.PLANNING]
|
|
46
|
+
|
|
47
|
+
def stage_type(self) -> StageTypeEnum:
|
|
48
|
+
return StageTypeEnum.EDITING
|
|
49
|
+
|
|
50
|
+
async def build_components(self, builder: Builder) -> None:
|
|
51
|
+
"""
|
|
52
|
+
Build the components required for the iterative planner.
|
|
53
|
+
"""
|
|
54
|
+
logger.debug("Building components for IterativePlanRefinementEditor")
|
|
55
|
+
self.llm_bound = await builder.get_llm(self.config.editor_llm, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
|
|
56
|
+
|
|
57
|
+
async def refine_single(self, prompt: str, context: str, its_item: ITSItem, prompt_idx: int) -> ITSItem:
|
|
58
|
+
from langchain_core.language_models import BaseChatModel
|
|
59
|
+
from langchain_core.prompts import PromptTemplate
|
|
60
|
+
|
|
61
|
+
if not isinstance(self.llm_bound, BaseChatModel):
|
|
62
|
+
raise ValueError("editor_llm must be a BaseChatModel instance for iterative plan refinement.")
|
|
63
|
+
|
|
64
|
+
llm: BaseChatModel = self.llm_bound
|
|
65
|
+
|
|
66
|
+
# Refinement loop
|
|
67
|
+
refinement_template = PromptTemplate(
|
|
68
|
+
template=self.config.refinement_template,
|
|
69
|
+
input_variables=["current_plan", "context", "original_prompt"],
|
|
70
|
+
validate_template=True,
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
current_plan = its_item.plan
|
|
74
|
+
for iteration in range(1, self.config.num_iterations + 1):
|
|
75
|
+
logger.info("Refinement iteration %d / %d for prompt %d", iteration, self.config.num_iterations, prompt_idx)
|
|
76
|
+
refine_prompt = (await refinement_template.ainvoke({
|
|
77
|
+
"current_plan": current_plan, "context": context, "original_prompt": prompt
|
|
78
|
+
})).to_string()
|
|
79
|
+
|
|
80
|
+
refine_response = await llm.ainvoke(refine_prompt)
|
|
81
|
+
refined_plan = remove_r1_think_tags(
|
|
82
|
+
refine_response.content if hasattr(refine_response, 'content') else str(refine_response))
|
|
83
|
+
refined_plan = re.sub(r'(?i)^\s*EDITED PLAN:\s*', '', refined_plan).strip()
|
|
84
|
+
if refined_plan:
|
|
85
|
+
current_plan = refined_plan
|
|
86
|
+
else:
|
|
87
|
+
logger.warning("Refinement iteration %d for prompt %d produced an empty plan; keeping existing plan.",
|
|
88
|
+
iteration,
|
|
89
|
+
prompt_idx)
|
|
90
|
+
|
|
91
|
+
logger.info("IterativePlanRefinementPlanner produced a final plan after %d iterations.",
|
|
92
|
+
self.config.num_iterations)
|
|
93
|
+
|
|
94
|
+
its_item.plan = current_plan
|
|
95
|
+
# Return a single final plan
|
|
96
|
+
return its_item
|
|
97
|
+
|
|
98
|
+
async def ainvoke(self,
|
|
99
|
+
items: list[ITSItem],
|
|
100
|
+
original_prompt: str | None = None,
|
|
101
|
+
agent_context: str | None = None,
|
|
102
|
+
**kwargs) -> list[ITSItem]:
|
|
103
|
+
"""
|
|
104
|
+
Runs the iterative plan refinement process on the provided planning items.
|
|
105
|
+
|
|
106
|
+
Each planning item is refined in parallel the configured number of times. Default is 3.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
items (list[ITSItem]): The planning items to refine.
|
|
110
|
+
original_prompt (str): The original prompt used to generate the plans.
|
|
111
|
+
agent_context (str): The context for the agent.
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
list[ITSItem]: The refined planning items.
|
|
115
|
+
"""
|
|
116
|
+
|
|
117
|
+
if not original_prompt or not agent_context:
|
|
118
|
+
raise ValueError("Arguments original_prompt and agent_context must be provdied.")
|
|
119
|
+
|
|
120
|
+
# Generate feedback for each planning item concurrently
|
|
121
|
+
tasks = [
|
|
122
|
+
self.refine_single(prompt=original_prompt, context=agent_context, its_item=item, prompt_idx=i + 1)
|
|
123
|
+
for i, item in enumerate(items)
|
|
124
|
+
]
|
|
125
|
+
|
|
126
|
+
# Run the tasks concurrently and gather results
|
|
127
|
+
refined_planning_items = await asyncio.gather(*tasks)
|
|
128
|
+
|
|
129
|
+
return refined_planning_items
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
@register_its_strategy(config_type=IterativePlanRefinementConfig)
|
|
133
|
+
async def register_iterative_plan_refinement_editor(config: IterativePlanRefinementConfig, builder: Builder):
|
|
134
|
+
"""
|
|
135
|
+
Register the IterativePlanRefinementEditor strategy.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
config (IterativePlanRefinementConfig): The configuration for the strategy.
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
IterativePlanRefinementEditor: The registered strategy instance.
|
|
142
|
+
"""
|
|
143
|
+
|
|
144
|
+
editor = IterativePlanRefinementEditor(config)
|
|
145
|
+
await editor.build_components(builder=builder)
|
|
146
|
+
|
|
147
|
+
yield editor
|