aiqtoolkit 1.1.0a20250503__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiqtoolkit might be problematic. Click here for more details.
- aiq/agent/__init__.py +0 -0
- aiq/agent/base.py +76 -0
- aiq/agent/dual_node.py +67 -0
- aiq/agent/react_agent/__init__.py +0 -0
- aiq/agent/react_agent/agent.py +322 -0
- aiq/agent/react_agent/output_parser.py +104 -0
- aiq/agent/react_agent/prompt.py +46 -0
- aiq/agent/react_agent/register.py +148 -0
- aiq/agent/reasoning_agent/__init__.py +0 -0
- aiq/agent/reasoning_agent/reasoning_agent.py +224 -0
- aiq/agent/register.py +23 -0
- aiq/agent/rewoo_agent/__init__.py +0 -0
- aiq/agent/rewoo_agent/agent.py +410 -0
- aiq/agent/rewoo_agent/prompt.py +108 -0
- aiq/agent/rewoo_agent/register.py +158 -0
- aiq/agent/tool_calling_agent/__init__.py +0 -0
- aiq/agent/tool_calling_agent/agent.py +123 -0
- aiq/agent/tool_calling_agent/register.py +105 -0
- aiq/builder/__init__.py +0 -0
- aiq/builder/builder.py +223 -0
- aiq/builder/component_utils.py +303 -0
- aiq/builder/context.py +212 -0
- aiq/builder/embedder.py +24 -0
- aiq/builder/eval_builder.py +116 -0
- aiq/builder/evaluator.py +29 -0
- aiq/builder/framework_enum.py +24 -0
- aiq/builder/front_end.py +73 -0
- aiq/builder/function.py +297 -0
- aiq/builder/function_base.py +376 -0
- aiq/builder/function_info.py +627 -0
- aiq/builder/intermediate_step_manager.py +127 -0
- aiq/builder/llm.py +25 -0
- aiq/builder/retriever.py +25 -0
- aiq/builder/user_interaction_manager.py +71 -0
- aiq/builder/workflow.py +143 -0
- aiq/builder/workflow_builder.py +749 -0
- aiq/cli/__init__.py +14 -0
- aiq/cli/cli_utils/__init__.py +0 -0
- aiq/cli/cli_utils/config_override.py +233 -0
- aiq/cli/cli_utils/validation.py +37 -0
- aiq/cli/commands/__init__.py +0 -0
- aiq/cli/commands/configure/__init__.py +0 -0
- aiq/cli/commands/configure/channel/__init__.py +0 -0
- aiq/cli/commands/configure/channel/add.py +28 -0
- aiq/cli/commands/configure/channel/channel.py +36 -0
- aiq/cli/commands/configure/channel/remove.py +30 -0
- aiq/cli/commands/configure/channel/update.py +30 -0
- aiq/cli/commands/configure/configure.py +33 -0
- aiq/cli/commands/evaluate.py +139 -0
- aiq/cli/commands/info/__init__.py +14 -0
- aiq/cli/commands/info/info.py +37 -0
- aiq/cli/commands/info/list_channels.py +32 -0
- aiq/cli/commands/info/list_components.py +129 -0
- aiq/cli/commands/registry/__init__.py +14 -0
- aiq/cli/commands/registry/publish.py +88 -0
- aiq/cli/commands/registry/pull.py +118 -0
- aiq/cli/commands/registry/registry.py +38 -0
- aiq/cli/commands/registry/remove.py +108 -0
- aiq/cli/commands/registry/search.py +155 -0
- aiq/cli/commands/start.py +250 -0
- aiq/cli/commands/uninstall.py +83 -0
- aiq/cli/commands/validate.py +47 -0
- aiq/cli/commands/workflow/__init__.py +14 -0
- aiq/cli/commands/workflow/templates/__init__.py.j2 +0 -0
- aiq/cli/commands/workflow/templates/config.yml.j2 +16 -0
- aiq/cli/commands/workflow/templates/pyproject.toml.j2 +22 -0
- aiq/cli/commands/workflow/templates/register.py.j2 +5 -0
- aiq/cli/commands/workflow/templates/workflow.py.j2 +36 -0
- aiq/cli/commands/workflow/workflow.py +37 -0
- aiq/cli/commands/workflow/workflow_commands.py +313 -0
- aiq/cli/entrypoint.py +133 -0
- aiq/cli/main.py +44 -0
- aiq/cli/register_workflow.py +408 -0
- aiq/cli/type_registry.py +879 -0
- aiq/data_models/__init__.py +14 -0
- aiq/data_models/api_server.py +588 -0
- aiq/data_models/common.py +143 -0
- aiq/data_models/component.py +46 -0
- aiq/data_models/component_ref.py +135 -0
- aiq/data_models/config.py +349 -0
- aiq/data_models/dataset_handler.py +122 -0
- aiq/data_models/discovery_metadata.py +269 -0
- aiq/data_models/embedder.py +26 -0
- aiq/data_models/evaluate.py +104 -0
- aiq/data_models/evaluator.py +26 -0
- aiq/data_models/front_end.py +26 -0
- aiq/data_models/function.py +30 -0
- aiq/data_models/function_dependencies.py +64 -0
- aiq/data_models/interactive.py +237 -0
- aiq/data_models/intermediate_step.py +269 -0
- aiq/data_models/invocation_node.py +38 -0
- aiq/data_models/llm.py +26 -0
- aiq/data_models/logging.py +26 -0
- aiq/data_models/memory.py +26 -0
- aiq/data_models/profiler.py +53 -0
- aiq/data_models/registry_handler.py +26 -0
- aiq/data_models/retriever.py +30 -0
- aiq/data_models/step_adaptor.py +64 -0
- aiq/data_models/streaming.py +33 -0
- aiq/data_models/swe_bench_model.py +54 -0
- aiq/data_models/telemetry_exporter.py +26 -0
- aiq/embedder/__init__.py +0 -0
- aiq/embedder/langchain_client.py +41 -0
- aiq/embedder/nim_embedder.py +58 -0
- aiq/embedder/openai_embedder.py +42 -0
- aiq/embedder/register.py +24 -0
- aiq/eval/__init__.py +14 -0
- aiq/eval/config.py +42 -0
- aiq/eval/dataset_handler/__init__.py +0 -0
- aiq/eval/dataset_handler/dataset_downloader.py +106 -0
- aiq/eval/dataset_handler/dataset_filter.py +52 -0
- aiq/eval/dataset_handler/dataset_handler.py +169 -0
- aiq/eval/evaluate.py +323 -0
- aiq/eval/evaluator/__init__.py +14 -0
- aiq/eval/evaluator/evaluator_model.py +44 -0
- aiq/eval/intermediate_step_adapter.py +93 -0
- aiq/eval/rag_evaluator/__init__.py +0 -0
- aiq/eval/rag_evaluator/evaluate.py +138 -0
- aiq/eval/rag_evaluator/register.py +138 -0
- aiq/eval/register.py +23 -0
- aiq/eval/remote_workflow.py +128 -0
- aiq/eval/runtime_event_subscriber.py +52 -0
- aiq/eval/swe_bench_evaluator/__init__.py +0 -0
- aiq/eval/swe_bench_evaluator/evaluate.py +215 -0
- aiq/eval/swe_bench_evaluator/register.py +36 -0
- aiq/eval/trajectory_evaluator/__init__.py +0 -0
- aiq/eval/trajectory_evaluator/evaluate.py +118 -0
- aiq/eval/trajectory_evaluator/register.py +40 -0
- aiq/eval/tunable_rag_evaluator/__init__.py +0 -0
- aiq/eval/tunable_rag_evaluator/evaluate.py +263 -0
- aiq/eval/tunable_rag_evaluator/register.py +50 -0
- aiq/eval/utils/__init__.py +0 -0
- aiq/eval/utils/output_uploader.py +131 -0
- aiq/eval/utils/tqdm_position_registry.py +40 -0
- aiq/front_ends/__init__.py +14 -0
- aiq/front_ends/console/__init__.py +14 -0
- aiq/front_ends/console/console_front_end_config.py +32 -0
- aiq/front_ends/console/console_front_end_plugin.py +107 -0
- aiq/front_ends/console/register.py +25 -0
- aiq/front_ends/cron/__init__.py +14 -0
- aiq/front_ends/fastapi/__init__.py +14 -0
- aiq/front_ends/fastapi/fastapi_front_end_config.py +150 -0
- aiq/front_ends/fastapi/fastapi_front_end_plugin.py +103 -0
- aiq/front_ends/fastapi/fastapi_front_end_plugin_worker.py +607 -0
- aiq/front_ends/fastapi/intermediate_steps_subscriber.py +80 -0
- aiq/front_ends/fastapi/job_store.py +161 -0
- aiq/front_ends/fastapi/main.py +70 -0
- aiq/front_ends/fastapi/message_handler.py +279 -0
- aiq/front_ends/fastapi/message_validator.py +345 -0
- aiq/front_ends/fastapi/register.py +25 -0
- aiq/front_ends/fastapi/response_helpers.py +195 -0
- aiq/front_ends/fastapi/step_adaptor.py +315 -0
- aiq/front_ends/fastapi/websocket.py +148 -0
- aiq/front_ends/mcp/__init__.py +14 -0
- aiq/front_ends/mcp/mcp_front_end_config.py +32 -0
- aiq/front_ends/mcp/mcp_front_end_plugin.py +93 -0
- aiq/front_ends/mcp/register.py +27 -0
- aiq/front_ends/mcp/tool_converter.py +242 -0
- aiq/front_ends/register.py +22 -0
- aiq/front_ends/simple_base/__init__.py +14 -0
- aiq/front_ends/simple_base/simple_front_end_plugin_base.py +52 -0
- aiq/llm/__init__.py +0 -0
- aiq/llm/nim_llm.py +45 -0
- aiq/llm/openai_llm.py +45 -0
- aiq/llm/register.py +22 -0
- aiq/llm/utils/__init__.py +14 -0
- aiq/llm/utils/env_config_value.py +94 -0
- aiq/llm/utils/error.py +17 -0
- aiq/memory/__init__.py +20 -0
- aiq/memory/interfaces.py +183 -0
- aiq/memory/models.py +102 -0
- aiq/meta/module_to_distro.json +3 -0
- aiq/meta/pypi.md +59 -0
- aiq/observability/__init__.py +0 -0
- aiq/observability/async_otel_listener.py +433 -0
- aiq/observability/register.py +99 -0
- aiq/plugins/.namespace +1 -0
- aiq/profiler/__init__.py +0 -0
- aiq/profiler/callbacks/__init__.py +0 -0
- aiq/profiler/callbacks/agno_callback_handler.py +295 -0
- aiq/profiler/callbacks/base_callback_class.py +20 -0
- aiq/profiler/callbacks/langchain_callback_handler.py +278 -0
- aiq/profiler/callbacks/llama_index_callback_handler.py +205 -0
- aiq/profiler/callbacks/semantic_kernel_callback_handler.py +238 -0
- aiq/profiler/callbacks/token_usage_base_model.py +27 -0
- aiq/profiler/data_frame_row.py +51 -0
- aiq/profiler/decorators/__init__.py +0 -0
- aiq/profiler/decorators/framework_wrapper.py +131 -0
- aiq/profiler/decorators/function_tracking.py +254 -0
- aiq/profiler/forecasting/__init__.py +0 -0
- aiq/profiler/forecasting/config.py +18 -0
- aiq/profiler/forecasting/model_trainer.py +75 -0
- aiq/profiler/forecasting/models/__init__.py +22 -0
- aiq/profiler/forecasting/models/forecasting_base_model.py +40 -0
- aiq/profiler/forecasting/models/linear_model.py +196 -0
- aiq/profiler/forecasting/models/random_forest_regressor.py +268 -0
- aiq/profiler/inference_metrics_model.py +25 -0
- aiq/profiler/inference_optimization/__init__.py +0 -0
- aiq/profiler/inference_optimization/bottleneck_analysis/__init__.py +0 -0
- aiq/profiler/inference_optimization/bottleneck_analysis/nested_stack_analysis.py +452 -0
- aiq/profiler/inference_optimization/bottleneck_analysis/simple_stack_analysis.py +258 -0
- aiq/profiler/inference_optimization/data_models.py +386 -0
- aiq/profiler/inference_optimization/experimental/__init__.py +0 -0
- aiq/profiler/inference_optimization/experimental/concurrency_spike_analysis.py +468 -0
- aiq/profiler/inference_optimization/experimental/prefix_span_analysis.py +405 -0
- aiq/profiler/inference_optimization/llm_metrics.py +212 -0
- aiq/profiler/inference_optimization/prompt_caching.py +163 -0
- aiq/profiler/inference_optimization/token_uniqueness.py +107 -0
- aiq/profiler/inference_optimization/workflow_runtimes.py +72 -0
- aiq/profiler/intermediate_property_adapter.py +102 -0
- aiq/profiler/profile_runner.py +433 -0
- aiq/profiler/utils.py +184 -0
- aiq/registry_handlers/__init__.py +0 -0
- aiq/registry_handlers/local/__init__.py +0 -0
- aiq/registry_handlers/local/local_handler.py +176 -0
- aiq/registry_handlers/local/register_local.py +37 -0
- aiq/registry_handlers/metadata_factory.py +60 -0
- aiq/registry_handlers/package_utils.py +198 -0
- aiq/registry_handlers/pypi/__init__.py +0 -0
- aiq/registry_handlers/pypi/pypi_handler.py +251 -0
- aiq/registry_handlers/pypi/register_pypi.py +40 -0
- aiq/registry_handlers/register.py +21 -0
- aiq/registry_handlers/registry_handler_base.py +157 -0
- aiq/registry_handlers/rest/__init__.py +0 -0
- aiq/registry_handlers/rest/register_rest.py +56 -0
- aiq/registry_handlers/rest/rest_handler.py +237 -0
- aiq/registry_handlers/schemas/__init__.py +0 -0
- aiq/registry_handlers/schemas/headers.py +42 -0
- aiq/registry_handlers/schemas/package.py +68 -0
- aiq/registry_handlers/schemas/publish.py +63 -0
- aiq/registry_handlers/schemas/pull.py +82 -0
- aiq/registry_handlers/schemas/remove.py +36 -0
- aiq/registry_handlers/schemas/search.py +91 -0
- aiq/registry_handlers/schemas/status.py +47 -0
- aiq/retriever/__init__.py +0 -0
- aiq/retriever/interface.py +37 -0
- aiq/retriever/milvus/__init__.py +14 -0
- aiq/retriever/milvus/register.py +81 -0
- aiq/retriever/milvus/retriever.py +228 -0
- aiq/retriever/models.py +74 -0
- aiq/retriever/nemo_retriever/__init__.py +14 -0
- aiq/retriever/nemo_retriever/register.py +60 -0
- aiq/retriever/nemo_retriever/retriever.py +190 -0
- aiq/retriever/register.py +22 -0
- aiq/runtime/__init__.py +14 -0
- aiq/runtime/loader.py +188 -0
- aiq/runtime/runner.py +176 -0
- aiq/runtime/session.py +136 -0
- aiq/runtime/user_metadata.py +131 -0
- aiq/settings/__init__.py +0 -0
- aiq/settings/global_settings.py +318 -0
- aiq/test/.namespace +1 -0
- aiq/tool/__init__.py +0 -0
- aiq/tool/code_execution/__init__.py +0 -0
- aiq/tool/code_execution/code_sandbox.py +188 -0
- aiq/tool/code_execution/local_sandbox/Dockerfile.sandbox +60 -0
- aiq/tool/code_execution/local_sandbox/__init__.py +13 -0
- aiq/tool/code_execution/local_sandbox/local_sandbox_server.py +79 -0
- aiq/tool/code_execution/local_sandbox/sandbox.requirements.txt +4 -0
- aiq/tool/code_execution/local_sandbox/start_local_sandbox.sh +25 -0
- aiq/tool/code_execution/register.py +70 -0
- aiq/tool/code_execution/utils.py +100 -0
- aiq/tool/datetime_tools.py +42 -0
- aiq/tool/document_search.py +141 -0
- aiq/tool/github_tools/__init__.py +0 -0
- aiq/tool/github_tools/create_github_commit.py +133 -0
- aiq/tool/github_tools/create_github_issue.py +87 -0
- aiq/tool/github_tools/create_github_pr.py +106 -0
- aiq/tool/github_tools/get_github_file.py +106 -0
- aiq/tool/github_tools/get_github_issue.py +166 -0
- aiq/tool/github_tools/get_github_pr.py +256 -0
- aiq/tool/github_tools/update_github_issue.py +100 -0
- aiq/tool/mcp/__init__.py +14 -0
- aiq/tool/mcp/mcp_client.py +220 -0
- aiq/tool/mcp/mcp_tool.py +76 -0
- aiq/tool/memory_tools/__init__.py +0 -0
- aiq/tool/memory_tools/add_memory_tool.py +67 -0
- aiq/tool/memory_tools/delete_memory_tool.py +67 -0
- aiq/tool/memory_tools/get_memory_tool.py +72 -0
- aiq/tool/nvidia_rag.py +95 -0
- aiq/tool/register.py +36 -0
- aiq/tool/retriever.py +89 -0
- aiq/utils/__init__.py +0 -0
- aiq/utils/data_models/__init__.py +0 -0
- aiq/utils/data_models/schema_validator.py +58 -0
- aiq/utils/debugging_utils.py +43 -0
- aiq/utils/exception_handlers/__init__.py +0 -0
- aiq/utils/exception_handlers/schemas.py +114 -0
- aiq/utils/io/__init__.py +0 -0
- aiq/utils/io/yaml_tools.py +119 -0
- aiq/utils/metadata_utils.py +74 -0
- aiq/utils/optional_imports.py +142 -0
- aiq/utils/producer_consumer_queue.py +178 -0
- aiq/utils/reactive/__init__.py +0 -0
- aiq/utils/reactive/base/__init__.py +0 -0
- aiq/utils/reactive/base/observable_base.py +65 -0
- aiq/utils/reactive/base/observer_base.py +55 -0
- aiq/utils/reactive/base/subject_base.py +79 -0
- aiq/utils/reactive/observable.py +59 -0
- aiq/utils/reactive/observer.py +76 -0
- aiq/utils/reactive/subject.py +131 -0
- aiq/utils/reactive/subscription.py +49 -0
- aiq/utils/settings/__init__.py +0 -0
- aiq/utils/settings/global_settings.py +197 -0
- aiq/utils/type_converter.py +232 -0
- aiq/utils/type_utils.py +397 -0
- aiq/utils/url_utils.py +27 -0
- aiqtoolkit-1.1.0a20250503.dist-info/METADATA +330 -0
- aiqtoolkit-1.1.0a20250503.dist-info/RECORD +314 -0
- aiqtoolkit-1.1.0a20250503.dist-info/WHEEL +5 -0
- aiqtoolkit-1.1.0a20250503.dist-info/entry_points.txt +17 -0
- aiqtoolkit-1.1.0a20250503.dist-info/licenses/LICENSE-3rd-party.txt +3686 -0
- aiqtoolkit-1.1.0a20250503.dist-info/licenses/LICENSE.md +201 -0
- aiqtoolkit-1.1.0a20250503.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,295 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import copy
|
|
17
|
+
import logging
|
|
18
|
+
import threading
|
|
19
|
+
import time
|
|
20
|
+
from collections.abc import Callable
|
|
21
|
+
from typing import Any
|
|
22
|
+
from uuid import uuid4
|
|
23
|
+
|
|
24
|
+
import litellm
|
|
25
|
+
|
|
26
|
+
from aiq.builder.context import AIQContext
|
|
27
|
+
from aiq.builder.framework_enum import LLMFrameworkEnum
|
|
28
|
+
from aiq.data_models.intermediate_step import IntermediateStepPayload
|
|
29
|
+
from aiq.data_models.intermediate_step import IntermediateStepType
|
|
30
|
+
from aiq.data_models.intermediate_step import StreamEventData
|
|
31
|
+
from aiq.data_models.intermediate_step import TraceMetadata
|
|
32
|
+
from aiq.data_models.intermediate_step import UsageInfo
|
|
33
|
+
from aiq.profiler.callbacks.base_callback_class import BaseProfilerCallback
|
|
34
|
+
from aiq.profiler.callbacks.token_usage_base_model import TokenUsageBaseModel
|
|
35
|
+
|
|
36
|
+
logger = logging.getLogger(__name__)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class AgnoProfilerHandler(BaseProfilerCallback):
|
|
40
|
+
"""
|
|
41
|
+
A callback manager/handler for Agno that intercepts calls to:
|
|
42
|
+
|
|
43
|
+
- Tool execution
|
|
44
|
+
- LLM Calls
|
|
45
|
+
|
|
46
|
+
to collect usage statistics (tokens, inputs, outputs, time intervals, etc.)
|
|
47
|
+
and store them in AIQ Toolkit's usage_stats queue for subsequent analysis.
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
def __init__(self) -> None:
|
|
51
|
+
super().__init__()
|
|
52
|
+
self._lock = threading.Lock()
|
|
53
|
+
self.last_call_ts = time.time()
|
|
54
|
+
self.step_manager = AIQContext.get().intermediate_step_manager
|
|
55
|
+
|
|
56
|
+
# Original references to Agno methods (for uninstrumenting if needed)
|
|
57
|
+
self._original_tool_execute = None
|
|
58
|
+
self._original_llm_call = None
|
|
59
|
+
|
|
60
|
+
def instrument(self) -> None:
|
|
61
|
+
"""
|
|
62
|
+
Monkey-patch the relevant Agno methods with usage-stat collection logic.
|
|
63
|
+
"""
|
|
64
|
+
# Save the originals and apply patches
|
|
65
|
+
self._original_llm_call = getattr(litellm, "completion", None)
|
|
66
|
+
|
|
67
|
+
# Patch LLM completion if available
|
|
68
|
+
if self._original_llm_call:
|
|
69
|
+
litellm.completion = self._llm_call_monkey_patch()
|
|
70
|
+
logger.debug("AgnoProfilerHandler LLM call instrumentation applied successfully.")
|
|
71
|
+
else:
|
|
72
|
+
logger.debug("Could not patch Agno LLM calls: litellm.completion not found")
|
|
73
|
+
|
|
74
|
+
# Note: Agno doesn't have a class-based tool structure to patch directly.
|
|
75
|
+
# Instead, it uses decorators to convert functions to tools.
|
|
76
|
+
# In AIQ Toolkit, tool executions are captured at the execute_agno_tool level
|
|
77
|
+
# in packages/aiqtoolkit_agno/src/aiq/plugins/agno/tool_wrapper.py
|
|
78
|
+
|
|
79
|
+
# To properly monitor Agno tool executions, we would need to either:
|
|
80
|
+
# 1. Patch the execute_agno_tool function in tool_wrapper.py
|
|
81
|
+
# 2. Add explicit instrumentation in that function to push events to the step manager
|
|
82
|
+
# 3. Or, if Agno updates to have a class-based tool structure, update this handler
|
|
83
|
+
# to patch those classes
|
|
84
|
+
|
|
85
|
+
# Recommended future enhancement:
|
|
86
|
+
# The execute_agno_tool function in packages/aiqtoolkit_agno/src/aiq/plugins/agno/tool_wrapper.py
|
|
87
|
+
# should be updated to directly push IntermediateStepPayload events to the step manager
|
|
88
|
+
# at the beginning and end of tool execution, similar to what this handler does for LLM calls.
|
|
89
|
+
|
|
90
|
+
logger.debug("AgnoProfilerHandler instrumentation completed.")
|
|
91
|
+
|
|
92
|
+
def _tool_execute_monkey_patch(self) -> Callable[..., Any]:
|
|
93
|
+
"""
|
|
94
|
+
Returns a function that wraps tool execution calls with usage-logging.
|
|
95
|
+
|
|
96
|
+
Note: This method is currently not used in the instrument() function since
|
|
97
|
+
Agno doesn't have a class-based tool structure to patch. It's kept for
|
|
98
|
+
reference or future use if Agno changes its architecture.
|
|
99
|
+
"""
|
|
100
|
+
original_func = self._original_tool_execute
|
|
101
|
+
|
|
102
|
+
def wrapped_tool_execute(*args, **kwargs) -> Any:
|
|
103
|
+
"""
|
|
104
|
+
Collects usage stats for tool execution, calls the original, and captures output stats.
|
|
105
|
+
"""
|
|
106
|
+
now = time.time()
|
|
107
|
+
tool_name = kwargs.get("tool_name", "")
|
|
108
|
+
uuid = str(uuid4())
|
|
109
|
+
|
|
110
|
+
try:
|
|
111
|
+
# Pre-call usage event
|
|
112
|
+
stats = IntermediateStepPayload(event_type=IntermediateStepType.TOOL_START,
|
|
113
|
+
framework=LLMFrameworkEnum.AGNO,
|
|
114
|
+
name=tool_name,
|
|
115
|
+
UUID=uuid,
|
|
116
|
+
data=StreamEventData(),
|
|
117
|
+
metadata=TraceMetadata(tool_inputs={
|
|
118
|
+
"args": args, "kwargs": dict(kwargs)
|
|
119
|
+
}),
|
|
120
|
+
usage_info=UsageInfo(token_usage=TokenUsageBaseModel()))
|
|
121
|
+
|
|
122
|
+
self.step_manager.push_intermediate_step(stats)
|
|
123
|
+
self.last_call_ts = now
|
|
124
|
+
|
|
125
|
+
# Call the original execute
|
|
126
|
+
result = original_func(*args, **kwargs)
|
|
127
|
+
now = time.time()
|
|
128
|
+
|
|
129
|
+
# Post-call usage stats
|
|
130
|
+
usage_stat = IntermediateStepPayload(
|
|
131
|
+
event_type=IntermediateStepType.TOOL_END,
|
|
132
|
+
span_event_timestamp=now,
|
|
133
|
+
framework=LLMFrameworkEnum.AGNO,
|
|
134
|
+
name=tool_name,
|
|
135
|
+
UUID=uuid,
|
|
136
|
+
data=StreamEventData(input={
|
|
137
|
+
"args": args, "kwargs": dict(kwargs)
|
|
138
|
+
}, output=str(result)),
|
|
139
|
+
metadata=TraceMetadata(tool_outputs={"result": str(result)}),
|
|
140
|
+
usage_info=UsageInfo(token_usage=TokenUsageBaseModel()),
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
self.step_manager.push_intermediate_step(usage_stat)
|
|
144
|
+
return result
|
|
145
|
+
|
|
146
|
+
except Exception as e:
|
|
147
|
+
logger.exception("Tool execution error: %s", e)
|
|
148
|
+
raise
|
|
149
|
+
|
|
150
|
+
return wrapped_tool_execute
|
|
151
|
+
|
|
152
|
+
def _llm_call_monkey_patch(self) -> Callable[..., Any]:
|
|
153
|
+
"""
|
|
154
|
+
Returns a function that wraps calls to litellm.completion(...) with usage-logging.
|
|
155
|
+
"""
|
|
156
|
+
original_func = self._original_llm_call
|
|
157
|
+
|
|
158
|
+
def wrapped_llm_call(*args, **kwargs) -> Any:
|
|
159
|
+
"""
|
|
160
|
+
Collects usage stats for LLM calls, calls the original, and captures output stats.
|
|
161
|
+
"""
|
|
162
|
+
now = time.time()
|
|
163
|
+
seconds_between_calls = int(now - self.last_call_ts)
|
|
164
|
+
model_name = kwargs.get('model', "")
|
|
165
|
+
|
|
166
|
+
model_input = ""
|
|
167
|
+
try:
|
|
168
|
+
for message in kwargs.get('messages', []):
|
|
169
|
+
model_input += message.get('content', "")
|
|
170
|
+
except Exception as e:
|
|
171
|
+
logger.exception("Error getting model input: %s", e)
|
|
172
|
+
|
|
173
|
+
uuid = str(uuid4())
|
|
174
|
+
|
|
175
|
+
# Record the start event
|
|
176
|
+
input_stats = IntermediateStepPayload(
|
|
177
|
+
event_type=IntermediateStepType.LLM_START,
|
|
178
|
+
framework=LLMFrameworkEnum.AGNO,
|
|
179
|
+
name=model_name,
|
|
180
|
+
UUID=uuid,
|
|
181
|
+
data=StreamEventData(input=model_input),
|
|
182
|
+
metadata=TraceMetadata(chat_inputs=copy.deepcopy(kwargs.get('messages', []))),
|
|
183
|
+
usage_info=UsageInfo(token_usage=TokenUsageBaseModel(),
|
|
184
|
+
num_llm_calls=1,
|
|
185
|
+
seconds_between_calls=seconds_between_calls))
|
|
186
|
+
|
|
187
|
+
self.step_manager.push_intermediate_step(input_stats)
|
|
188
|
+
|
|
189
|
+
# Verify we have a valid original function before calling it
|
|
190
|
+
if original_func is None:
|
|
191
|
+
logger.error("Original litellm.completion function is None - cannot call it")
|
|
192
|
+
output = None
|
|
193
|
+
else:
|
|
194
|
+
# Call the original litellm.completion(...)
|
|
195
|
+
logger.debug(
|
|
196
|
+
f"Calling litellm.completion for {model_name} with {len(args)} args and {len(kwargs)} kwargs")
|
|
197
|
+
try:
|
|
198
|
+
output = original_func(*args, **kwargs)
|
|
199
|
+
logger.debug(f"Original litellm.completion returned: {type(output)}")
|
|
200
|
+
except Exception as e:
|
|
201
|
+
logger.exception(f"Error calling original litellm.completion: {e}")
|
|
202
|
+
output = None
|
|
203
|
+
|
|
204
|
+
# Initialize default values
|
|
205
|
+
model_output = ""
|
|
206
|
+
chat_responses = None
|
|
207
|
+
token_usage = TokenUsageBaseModel()
|
|
208
|
+
|
|
209
|
+
# Log what we received to help with debugging
|
|
210
|
+
logger.debug(f"LLM call to {model_name} received output type: {type(output)}")
|
|
211
|
+
|
|
212
|
+
# Safely process the output if it's not None
|
|
213
|
+
if output is not None:
|
|
214
|
+
try:
|
|
215
|
+
# Extract model output text from choices
|
|
216
|
+
if hasattr(output, 'choices') and output.choices:
|
|
217
|
+
logger.debug(f"Output has {len(output.choices)} choices")
|
|
218
|
+
for i, choice in enumerate(output.choices):
|
|
219
|
+
logger.debug(f"Processing choice {i} of type {type(choice)}")
|
|
220
|
+
if hasattr(choice, 'model_extra') and 'message' in choice.model_extra:
|
|
221
|
+
msg = choice.model_extra["message"]
|
|
222
|
+
content = msg.get('content', "")
|
|
223
|
+
logger.debug(f"Got content from model_extra.message: {content[:50]}...")
|
|
224
|
+
model_output += content
|
|
225
|
+
elif hasattr(choice, 'message') and hasattr(choice.message, 'content'):
|
|
226
|
+
content = choice.message.content or ""
|
|
227
|
+
logger.debug(f"Got content from message.content: {content[:50]}...")
|
|
228
|
+
model_output += content
|
|
229
|
+
else:
|
|
230
|
+
logger.debug(f"Could not extract content from choice: {choice}")
|
|
231
|
+
|
|
232
|
+
# Try to get chat responses
|
|
233
|
+
if hasattr(output, 'choices') and len(output.choices) > 0:
|
|
234
|
+
choice = output.choices[0]
|
|
235
|
+
if hasattr(choice, 'model_dump'):
|
|
236
|
+
logger.debug("Using model_dump to extract chat responses")
|
|
237
|
+
chat_responses = choice.model_dump()
|
|
238
|
+
else:
|
|
239
|
+
# Fall back to a simpler representation
|
|
240
|
+
logger.debug("Falling back to simple representation for chat responses")
|
|
241
|
+
chat_responses = {"content": model_output}
|
|
242
|
+
|
|
243
|
+
# Try to get token usage
|
|
244
|
+
if hasattr(output, 'model_extra') and 'usage' in output.model_extra:
|
|
245
|
+
usage_data = output.model_extra['usage']
|
|
246
|
+
logger.debug(f"Found usage data of type {type(usage_data)}")
|
|
247
|
+
|
|
248
|
+
# Special debug for the test case
|
|
249
|
+
if hasattr(usage_data, 'prompt_tokens'
|
|
250
|
+
) and usage_data.prompt_tokens == 20 and usage_data.completion_tokens == 15:
|
|
251
|
+
logger.debug("Found test case token usage object with 20/15/35 tokens")
|
|
252
|
+
|
|
253
|
+
if hasattr(usage_data, 'model_dump'):
|
|
254
|
+
logger.debug("Using model_dump to extract token usage")
|
|
255
|
+
token_usage = TokenUsageBaseModel(**usage_data.model_dump())
|
|
256
|
+
elif isinstance(usage_data, dict):
|
|
257
|
+
logger.debug("Extracting token usage from dictionary")
|
|
258
|
+
token_usage = TokenUsageBaseModel(prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
259
|
+
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
260
|
+
total_tokens=usage_data.get('total_tokens', 0))
|
|
261
|
+
elif isinstance(usage_data, TokenUsageBaseModel):
|
|
262
|
+
# If it's already a TokenUsageBaseModel instance, use it directly
|
|
263
|
+
logger.debug("Using TokenUsageBaseModel directly")
|
|
264
|
+
token_usage = usage_data
|
|
265
|
+
elif hasattr(usage_data, 'prompt_tokens') and hasattr(
|
|
266
|
+
usage_data, 'completion_tokens') and hasattr(usage_data, 'total_tokens'):
|
|
267
|
+
# For objects that have the needed properties but aren't TokenUsageBaseModel
|
|
268
|
+
logger.debug("Using object with token properties")
|
|
269
|
+
token_usage = TokenUsageBaseModel(prompt_tokens=usage_data.prompt_tokens,
|
|
270
|
+
completion_tokens=usage_data.completion_tokens,
|
|
271
|
+
total_tokens=usage_data.total_tokens)
|
|
272
|
+
|
|
273
|
+
logger.debug(f"Final token usage: prompt={token_usage.prompt_tokens}, "
|
|
274
|
+
f"completion={token_usage.completion_tokens}, "
|
|
275
|
+
f"total={token_usage.total_tokens}")
|
|
276
|
+
except Exception as e:
|
|
277
|
+
logger.exception("Error getting model output: %s", e)
|
|
278
|
+
|
|
279
|
+
now = time.time()
|
|
280
|
+
# Record the end event
|
|
281
|
+
output_stats = IntermediateStepPayload(event_type=IntermediateStepType.LLM_END,
|
|
282
|
+
span_event_timestamp=now,
|
|
283
|
+
framework=LLMFrameworkEnum.AGNO,
|
|
284
|
+
name=model_name,
|
|
285
|
+
UUID=uuid,
|
|
286
|
+
data=StreamEventData(input=model_input, output=model_output),
|
|
287
|
+
metadata=TraceMetadata(chat_responses=chat_responses),
|
|
288
|
+
usage_info=UsageInfo(token_usage=token_usage,
|
|
289
|
+
num_llm_calls=1,
|
|
290
|
+
seconds_between_calls=seconds_between_calls))
|
|
291
|
+
|
|
292
|
+
self.step_manager.push_intermediate_step(output_stats)
|
|
293
|
+
return output
|
|
294
|
+
|
|
295
|
+
return wrapped_llm_call
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from abc import ABC
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class BaseProfilerCallback(ABC):
|
|
20
|
+
pass
|
|
@@ -0,0 +1,278 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from __future__ import annotations
|
|
17
|
+
|
|
18
|
+
import copy
|
|
19
|
+
import logging
|
|
20
|
+
import threading
|
|
21
|
+
import time
|
|
22
|
+
from typing import Any
|
|
23
|
+
from uuid import UUID
|
|
24
|
+
from uuid import uuid4
|
|
25
|
+
|
|
26
|
+
from langchain_core.callbacks import AsyncCallbackHandler
|
|
27
|
+
from langchain_core.messages import AIMessage
|
|
28
|
+
from langchain_core.messages import BaseMessage
|
|
29
|
+
from langchain_core.outputs import ChatGeneration
|
|
30
|
+
from langchain_core.outputs import LLMResult
|
|
31
|
+
|
|
32
|
+
from aiq.builder.context import AIQContext
|
|
33
|
+
from aiq.builder.framework_enum import LLMFrameworkEnum
|
|
34
|
+
from aiq.data_models.intermediate_step import IntermediateStepPayload
|
|
35
|
+
from aiq.data_models.intermediate_step import IntermediateStepType
|
|
36
|
+
from aiq.data_models.intermediate_step import StreamEventData
|
|
37
|
+
from aiq.data_models.intermediate_step import TraceMetadata
|
|
38
|
+
from aiq.data_models.intermediate_step import UsageInfo
|
|
39
|
+
from aiq.profiler.callbacks.base_callback_class import BaseProfilerCallback
|
|
40
|
+
from aiq.profiler.callbacks.token_usage_base_model import TokenUsageBaseModel
|
|
41
|
+
|
|
42
|
+
logger = logging.getLogger(__name__)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class LangchainProfilerHandler(AsyncCallbackHandler, BaseProfilerCallback): # pylint: disable=R0901
|
|
46
|
+
"""Callback Handler that tracks NIM info."""
|
|
47
|
+
|
|
48
|
+
total_tokens: int = 0
|
|
49
|
+
prompt_tokens: int = 0
|
|
50
|
+
completion_tokens: int = 0
|
|
51
|
+
successful_requests: int = 0
|
|
52
|
+
raise_error = True # Override to raise error and run inline
|
|
53
|
+
run_inline = True
|
|
54
|
+
|
|
55
|
+
def __init__(self) -> None:
|
|
56
|
+
super().__init__()
|
|
57
|
+
self._lock = threading.Lock()
|
|
58
|
+
self.last_call_ts = time.time()
|
|
59
|
+
|
|
60
|
+
self.step_manager = AIQContext.get().intermediate_step_manager
|
|
61
|
+
self._state = IntermediateStepType.LLM_END
|
|
62
|
+
|
|
63
|
+
self._run_id_to_model_name = {}
|
|
64
|
+
self._run_id_to_llm_input = {}
|
|
65
|
+
self._run_id_to_tool_input = {}
|
|
66
|
+
self._run_id_to_start_time = {}
|
|
67
|
+
|
|
68
|
+
def __repr__(self) -> str:
|
|
69
|
+
return (f"Tokens Used: {self.total_tokens}\n"
|
|
70
|
+
f"\tPrompt Tokens: {self.prompt_tokens}\n"
|
|
71
|
+
f"\tCompletion Tokens: {self.completion_tokens}\n"
|
|
72
|
+
f"Successful Requests: {self.successful_requests}\n")
|
|
73
|
+
|
|
74
|
+
@property
|
|
75
|
+
def always_verbose(self) -> bool:
|
|
76
|
+
"""Whether to call verbose callbacks even if verbose is False."""
|
|
77
|
+
return True
|
|
78
|
+
|
|
79
|
+
def _extract_token_base_model(self, usage_metadata: dict[str, Any]) -> TokenUsageBaseModel:
|
|
80
|
+
if usage_metadata:
|
|
81
|
+
prompt_tokens = usage_metadata.get("input_tokens", 0)
|
|
82
|
+
completion_tokens = usage_metadata.get("output_tokens", 0)
|
|
83
|
+
total_tokens = usage_metadata.get("total_tokens", 0)
|
|
84
|
+
|
|
85
|
+
return TokenUsageBaseModel(
|
|
86
|
+
prompt_tokens=prompt_tokens,
|
|
87
|
+
completion_tokens=completion_tokens,
|
|
88
|
+
total_tokens=total_tokens,
|
|
89
|
+
)
|
|
90
|
+
return TokenUsageBaseModel()
|
|
91
|
+
|
|
92
|
+
async def on_llm_start(self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any) -> None:
|
|
93
|
+
|
|
94
|
+
model_name = ""
|
|
95
|
+
try:
|
|
96
|
+
model_name = kwargs.get("metadata")["ls_model_name"]
|
|
97
|
+
except Exception as e:
|
|
98
|
+
logger.exception("Error getting model name: %s", e, exc_info=True)
|
|
99
|
+
|
|
100
|
+
run_id = str(kwargs.get("run_id", str(uuid4())))
|
|
101
|
+
self._run_id_to_model_name[run_id] = model_name
|
|
102
|
+
|
|
103
|
+
stats = IntermediateStepPayload(event_type=IntermediateStepType.LLM_START,
|
|
104
|
+
framework=LLMFrameworkEnum.LANGCHAIN,
|
|
105
|
+
name=model_name,
|
|
106
|
+
UUID=run_id,
|
|
107
|
+
data=StreamEventData(input=prompts[-1]),
|
|
108
|
+
metadata=TraceMetadata(chat_inputs=copy.deepcopy(prompts)),
|
|
109
|
+
usage_info=UsageInfo(token_usage=TokenUsageBaseModel(),
|
|
110
|
+
num_llm_calls=1,
|
|
111
|
+
seconds_between_calls=int(time.time() -
|
|
112
|
+
self.last_call_ts)))
|
|
113
|
+
|
|
114
|
+
self.step_manager.push_intermediate_step(stats)
|
|
115
|
+
self._run_id_to_llm_input[run_id] = prompts[-1]
|
|
116
|
+
self._state = IntermediateStepType.LLM_START
|
|
117
|
+
self.last_call_ts = time.time()
|
|
118
|
+
self._run_id_to_start_time[run_id] = time.time()
|
|
119
|
+
|
|
120
|
+
async def on_chat_model_start(
|
|
121
|
+
self,
|
|
122
|
+
serialized: dict[str, Any],
|
|
123
|
+
messages: list[list[BaseMessage]],
|
|
124
|
+
*,
|
|
125
|
+
run_id: UUID,
|
|
126
|
+
parent_run_id: UUID | None = None,
|
|
127
|
+
tags: list[str] | None = None,
|
|
128
|
+
metadata: dict[str, Any] | None = None,
|
|
129
|
+
**kwargs: Any,
|
|
130
|
+
) -> Any:
|
|
131
|
+
|
|
132
|
+
model_name = ""
|
|
133
|
+
try:
|
|
134
|
+
model_name = metadata["ls_model_name"] if metadata else kwargs.get("metadata")["ls_model_name"]
|
|
135
|
+
except Exception as e:
|
|
136
|
+
logger.exception("Error getting model name: %s", e, exc_info=True)
|
|
137
|
+
|
|
138
|
+
run_id = str(run_id)
|
|
139
|
+
self._run_id_to_model_name[run_id] = model_name
|
|
140
|
+
|
|
141
|
+
stats = IntermediateStepPayload(event_type=IntermediateStepType.LLM_START,
|
|
142
|
+
framework=LLMFrameworkEnum.LANGCHAIN,
|
|
143
|
+
name=model_name,
|
|
144
|
+
UUID=run_id,
|
|
145
|
+
data=StreamEventData(input=copy.deepcopy(messages[0])),
|
|
146
|
+
metadata=TraceMetadata(chat_inputs=copy.deepcopy(messages[0])),
|
|
147
|
+
usage_info=UsageInfo(token_usage=TokenUsageBaseModel(),
|
|
148
|
+
num_llm_calls=1,
|
|
149
|
+
seconds_between_calls=int(time.time() -
|
|
150
|
+
self.last_call_ts)))
|
|
151
|
+
|
|
152
|
+
self.step_manager.push_intermediate_step(stats)
|
|
153
|
+
self._run_id_to_llm_input[run_id] = messages[0][-1].content
|
|
154
|
+
self._state = IntermediateStepType.LLM_START
|
|
155
|
+
self.last_call_ts = time.time()
|
|
156
|
+
self._run_id_to_start_time[run_id] = time.time()
|
|
157
|
+
|
|
158
|
+
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
|
|
159
|
+
"""Collect stats for just the token"""
|
|
160
|
+
model_name = ""
|
|
161
|
+
try:
|
|
162
|
+
model_name = self._run_id_to_model_name.get(str(kwargs.get("run_id", "")), "")
|
|
163
|
+
except Exception as e:
|
|
164
|
+
logger.exception("Error getting model name: %s", e, exc_info=True)
|
|
165
|
+
|
|
166
|
+
usage_metadata = {}
|
|
167
|
+
try:
|
|
168
|
+
usage_metadata = kwargs.get("chunk").message.usage_metadata if kwargs.get("chunk") else {}
|
|
169
|
+
except Exception as e:
|
|
170
|
+
logger.exception("Error getting usage metadata: %s", e, exc_info=True)
|
|
171
|
+
|
|
172
|
+
stats = IntermediateStepPayload(
|
|
173
|
+
event_type=IntermediateStepType.LLM_NEW_TOKEN,
|
|
174
|
+
framework=LLMFrameworkEnum.LANGCHAIN,
|
|
175
|
+
name=model_name,
|
|
176
|
+
UUID=str(kwargs.get("run_id", str(uuid4()))),
|
|
177
|
+
data=StreamEventData(input=self._run_id_to_llm_input.get(str(kwargs.get("run_id", "")), ""), chunk=token),
|
|
178
|
+
usage_info=UsageInfo(token_usage=self._extract_token_base_model(usage_metadata),
|
|
179
|
+
num_llm_calls=1,
|
|
180
|
+
seconds_between_calls=int(time.time() - self.last_call_ts)),
|
|
181
|
+
metadata=TraceMetadata(chat_responses=[kwargs.get("chunk")] if kwargs.get("chunk") else []))
|
|
182
|
+
|
|
183
|
+
self.step_manager.push_intermediate_step(stats)
|
|
184
|
+
|
|
185
|
+
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
|
186
|
+
"""Collect token usage."""
|
|
187
|
+
|
|
188
|
+
usage_metadata = {}
|
|
189
|
+
|
|
190
|
+
model_name = ""
|
|
191
|
+
try:
|
|
192
|
+
model_name = response.llm_output["model_name"]
|
|
193
|
+
except Exception as e:
|
|
194
|
+
try:
|
|
195
|
+
model_name = self._run_id_to_model_name.get(str(kwargs.get("run_id", "")), "")
|
|
196
|
+
except Exception as e_inner:
|
|
197
|
+
logger.exception("Error getting model name: %s from outer error %s", e_inner, e, exc_info=True)
|
|
198
|
+
|
|
199
|
+
try:
|
|
200
|
+
generation = response.generations[0][0]
|
|
201
|
+
except IndexError:
|
|
202
|
+
generation = None
|
|
203
|
+
|
|
204
|
+
if isinstance(generation, ChatGeneration):
|
|
205
|
+
try:
|
|
206
|
+
message = generation.message
|
|
207
|
+
if isinstance(message, AIMessage):
|
|
208
|
+
usage_metadata = message.usage_metadata
|
|
209
|
+
else:
|
|
210
|
+
usage_metadata = {}
|
|
211
|
+
except AttributeError:
|
|
212
|
+
usage_metadata = {}
|
|
213
|
+
|
|
214
|
+
llm_text_output = generation.message.content if generation else ""
|
|
215
|
+
|
|
216
|
+
# update shared state behind lock
|
|
217
|
+
with self._lock:
|
|
218
|
+
usage_stat = IntermediateStepPayload(
|
|
219
|
+
span_event_timestamp=self._run_id_to_start_time.get(str(kwargs.get("run_id", "")), time.time()),
|
|
220
|
+
event_type=IntermediateStepType.LLM_END,
|
|
221
|
+
framework=LLMFrameworkEnum.LANGCHAIN,
|
|
222
|
+
name=model_name,
|
|
223
|
+
UUID=str(kwargs.get("run_id", str(uuid4()))),
|
|
224
|
+
data=StreamEventData(input=self._run_id_to_llm_input.get(str(kwargs.get("run_id", "")), ""),
|
|
225
|
+
output=llm_text_output),
|
|
226
|
+
usage_info=UsageInfo(token_usage=self._extract_token_base_model(usage_metadata)),
|
|
227
|
+
metadata=TraceMetadata(chat_responses=[generation] if generation else []))
|
|
228
|
+
|
|
229
|
+
self.step_manager.push_intermediate_step(usage_stat)
|
|
230
|
+
|
|
231
|
+
self._state = IntermediateStepType.LLM_END
|
|
232
|
+
|
|
233
|
+
async def on_tool_start(
|
|
234
|
+
self,
|
|
235
|
+
serialized: dict[str, Any],
|
|
236
|
+
input_str: str,
|
|
237
|
+
*,
|
|
238
|
+
run_id: UUID,
|
|
239
|
+
parent_run_id: UUID | None = None,
|
|
240
|
+
tags: list[str] | None = None,
|
|
241
|
+
metadata: dict[str, Any] | None = None,
|
|
242
|
+
inputs: dict[str, Any] | None = None,
|
|
243
|
+
**kwargs: Any,
|
|
244
|
+
) -> Any:
|
|
245
|
+
|
|
246
|
+
stats = IntermediateStepPayload(event_type=IntermediateStepType.TOOL_START,
|
|
247
|
+
framework=LLMFrameworkEnum.LANGCHAIN,
|
|
248
|
+
name=serialized.get("name", ""),
|
|
249
|
+
UUID=str(run_id),
|
|
250
|
+
data=StreamEventData(input=input_str),
|
|
251
|
+
metadata=TraceMetadata(tool_inputs=copy.deepcopy(inputs),
|
|
252
|
+
tool_info=copy.deepcopy(serialized)),
|
|
253
|
+
usage_info=UsageInfo(token_usage=TokenUsageBaseModel()))
|
|
254
|
+
|
|
255
|
+
self.step_manager.push_intermediate_step(stats)
|
|
256
|
+
self._run_id_to_tool_input[str(run_id)] = input_str
|
|
257
|
+
self._run_id_to_start_time[str(run_id)] = time.time()
|
|
258
|
+
|
|
259
|
+
async def on_tool_end(
|
|
260
|
+
self,
|
|
261
|
+
output: Any,
|
|
262
|
+
*,
|
|
263
|
+
run_id: UUID,
|
|
264
|
+
parent_run_id: UUID | None = None,
|
|
265
|
+
**kwargs: Any,
|
|
266
|
+
) -> Any:
|
|
267
|
+
|
|
268
|
+
stats = IntermediateStepPayload(event_type=IntermediateStepType.TOOL_END,
|
|
269
|
+
span_event_timestamp=self._run_id_to_start_time.get(str(run_id), time.time()),
|
|
270
|
+
framework=LLMFrameworkEnum.LANGCHAIN,
|
|
271
|
+
name=kwargs.get("name", ""),
|
|
272
|
+
UUID=str(run_id),
|
|
273
|
+
metadata=TraceMetadata(tool_outputs=output),
|
|
274
|
+
usage_info=UsageInfo(token_usage=TokenUsageBaseModel()),
|
|
275
|
+
data=StreamEventData(input=self._run_id_to_tool_input.get(str(run_id), ""),
|
|
276
|
+
output=output))
|
|
277
|
+
|
|
278
|
+
self.step_manager.push_intermediate_step(stats)
|