nvidia-nat 1.1.0a20251020__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiq/__init__.py +66 -0
- nat/agent/__init__.py +0 -0
- nat/agent/base.py +265 -0
- nat/agent/dual_node.py +72 -0
- nat/agent/prompt_optimizer/__init__.py +0 -0
- nat/agent/prompt_optimizer/prompt.py +68 -0
- nat/agent/prompt_optimizer/register.py +149 -0
- nat/agent/react_agent/__init__.py +0 -0
- nat/agent/react_agent/agent.py +394 -0
- nat/agent/react_agent/output_parser.py +104 -0
- nat/agent/react_agent/prompt.py +44 -0
- nat/agent/react_agent/register.py +168 -0
- nat/agent/reasoning_agent/__init__.py +0 -0
- nat/agent/reasoning_agent/reasoning_agent.py +227 -0
- nat/agent/register.py +23 -0
- nat/agent/rewoo_agent/__init__.py +0 -0
- nat/agent/rewoo_agent/agent.py +593 -0
- nat/agent/rewoo_agent/prompt.py +107 -0
- nat/agent/rewoo_agent/register.py +175 -0
- nat/agent/tool_calling_agent/__init__.py +0 -0
- nat/agent/tool_calling_agent/agent.py +246 -0
- nat/agent/tool_calling_agent/register.py +129 -0
- nat/authentication/__init__.py +14 -0
- nat/authentication/api_key/__init__.py +14 -0
- nat/authentication/api_key/api_key_auth_provider.py +96 -0
- nat/authentication/api_key/api_key_auth_provider_config.py +124 -0
- nat/authentication/api_key/register.py +26 -0
- nat/authentication/credential_validator/__init__.py +14 -0
- nat/authentication/credential_validator/bearer_token_validator.py +557 -0
- nat/authentication/exceptions/__init__.py +14 -0
- nat/authentication/exceptions/api_key_exceptions.py +38 -0
- nat/authentication/http_basic_auth/__init__.py +0 -0
- nat/authentication/http_basic_auth/http_basic_auth_provider.py +81 -0
- nat/authentication/http_basic_auth/register.py +30 -0
- nat/authentication/interfaces.py +96 -0
- nat/authentication/oauth2/__init__.py +14 -0
- nat/authentication/oauth2/oauth2_auth_code_flow_provider.py +140 -0
- nat/authentication/oauth2/oauth2_auth_code_flow_provider_config.py +39 -0
- nat/authentication/oauth2/oauth2_resource_server_config.py +124 -0
- nat/authentication/oauth2/register.py +25 -0
- nat/authentication/register.py +20 -0
- nat/builder/__init__.py +0 -0
- nat/builder/builder.py +317 -0
- nat/builder/component_utils.py +320 -0
- nat/builder/context.py +321 -0
- nat/builder/embedder.py +24 -0
- nat/builder/eval_builder.py +166 -0
- nat/builder/evaluator.py +29 -0
- nat/builder/framework_enum.py +25 -0
- nat/builder/front_end.py +73 -0
- nat/builder/function.py +714 -0
- nat/builder/function_base.py +380 -0
- nat/builder/function_info.py +625 -0
- nat/builder/intermediate_step_manager.py +206 -0
- nat/builder/llm.py +25 -0
- nat/builder/retriever.py +25 -0
- nat/builder/user_interaction_manager.py +78 -0
- nat/builder/workflow.py +160 -0
- nat/builder/workflow_builder.py +1365 -0
- nat/cli/__init__.py +14 -0
- nat/cli/cli_utils/__init__.py +0 -0
- nat/cli/cli_utils/config_override.py +231 -0
- nat/cli/cli_utils/validation.py +37 -0
- nat/cli/commands/__init__.py +0 -0
- nat/cli/commands/configure/__init__.py +0 -0
- nat/cli/commands/configure/channel/__init__.py +0 -0
- nat/cli/commands/configure/channel/add.py +28 -0
- nat/cli/commands/configure/channel/channel.py +34 -0
- nat/cli/commands/configure/channel/remove.py +30 -0
- nat/cli/commands/configure/channel/update.py +30 -0
- nat/cli/commands/configure/configure.py +33 -0
- nat/cli/commands/evaluate.py +139 -0
- nat/cli/commands/info/__init__.py +14 -0
- nat/cli/commands/info/info.py +47 -0
- nat/cli/commands/info/list_channels.py +32 -0
- nat/cli/commands/info/list_components.py +128 -0
- nat/cli/commands/mcp/__init__.py +14 -0
- nat/cli/commands/mcp/mcp.py +986 -0
- nat/cli/commands/object_store/__init__.py +14 -0
- nat/cli/commands/object_store/object_store.py +227 -0
- nat/cli/commands/optimize.py +90 -0
- nat/cli/commands/registry/__init__.py +14 -0
- nat/cli/commands/registry/publish.py +88 -0
- nat/cli/commands/registry/pull.py +118 -0
- nat/cli/commands/registry/registry.py +36 -0
- nat/cli/commands/registry/remove.py +108 -0
- nat/cli/commands/registry/search.py +153 -0
- nat/cli/commands/sizing/__init__.py +14 -0
- nat/cli/commands/sizing/calc.py +297 -0
- nat/cli/commands/sizing/sizing.py +27 -0
- nat/cli/commands/start.py +257 -0
- nat/cli/commands/uninstall.py +81 -0
- nat/cli/commands/validate.py +47 -0
- nat/cli/commands/workflow/__init__.py +14 -0
- nat/cli/commands/workflow/templates/__init__.py.j2 +0 -0
- nat/cli/commands/workflow/templates/config.yml.j2 +17 -0
- nat/cli/commands/workflow/templates/pyproject.toml.j2 +25 -0
- nat/cli/commands/workflow/templates/register.py.j2 +4 -0
- nat/cli/commands/workflow/templates/workflow.py.j2 +50 -0
- nat/cli/commands/workflow/workflow.py +37 -0
- nat/cli/commands/workflow/workflow_commands.py +403 -0
- nat/cli/entrypoint.py +141 -0
- nat/cli/main.py +60 -0
- nat/cli/register_workflow.py +522 -0
- nat/cli/type_registry.py +1069 -0
- nat/control_flow/__init__.py +0 -0
- nat/control_flow/register.py +20 -0
- nat/control_flow/router_agent/__init__.py +0 -0
- nat/control_flow/router_agent/agent.py +329 -0
- nat/control_flow/router_agent/prompt.py +48 -0
- nat/control_flow/router_agent/register.py +91 -0
- nat/control_flow/sequential_executor.py +166 -0
- nat/data_models/__init__.py +14 -0
- nat/data_models/agent.py +34 -0
- nat/data_models/api_server.py +843 -0
- nat/data_models/authentication.py +245 -0
- nat/data_models/common.py +171 -0
- nat/data_models/component.py +60 -0
- nat/data_models/component_ref.py +179 -0
- nat/data_models/config.py +434 -0
- nat/data_models/dataset_handler.py +169 -0
- nat/data_models/discovery_metadata.py +305 -0
- nat/data_models/embedder.py +27 -0
- nat/data_models/evaluate.py +130 -0
- nat/data_models/evaluator.py +26 -0
- nat/data_models/front_end.py +26 -0
- nat/data_models/function.py +64 -0
- nat/data_models/function_dependencies.py +80 -0
- nat/data_models/gated_field_mixin.py +242 -0
- nat/data_models/interactive.py +246 -0
- nat/data_models/intermediate_step.py +302 -0
- nat/data_models/invocation_node.py +38 -0
- nat/data_models/llm.py +27 -0
- nat/data_models/logging.py +26 -0
- nat/data_models/memory.py +27 -0
- nat/data_models/object_store.py +44 -0
- nat/data_models/optimizable.py +119 -0
- nat/data_models/optimizer.py +149 -0
- nat/data_models/profiler.py +54 -0
- nat/data_models/registry_handler.py +26 -0
- nat/data_models/retriever.py +30 -0
- nat/data_models/retry_mixin.py +35 -0
- nat/data_models/span.py +228 -0
- nat/data_models/step_adaptor.py +64 -0
- nat/data_models/streaming.py +33 -0
- nat/data_models/swe_bench_model.py +54 -0
- nat/data_models/telemetry_exporter.py +26 -0
- nat/data_models/temperature_mixin.py +44 -0
- nat/data_models/thinking_mixin.py +86 -0
- nat/data_models/top_p_mixin.py +44 -0
- nat/data_models/ttc_strategy.py +30 -0
- nat/embedder/__init__.py +0 -0
- nat/embedder/azure_openai_embedder.py +46 -0
- nat/embedder/nim_embedder.py +59 -0
- nat/embedder/openai_embedder.py +42 -0
- nat/embedder/register.py +22 -0
- nat/eval/__init__.py +14 -0
- nat/eval/config.py +62 -0
- nat/eval/dataset_handler/__init__.py +0 -0
- nat/eval/dataset_handler/dataset_downloader.py +106 -0
- nat/eval/dataset_handler/dataset_filter.py +52 -0
- nat/eval/dataset_handler/dataset_handler.py +431 -0
- nat/eval/evaluate.py +565 -0
- nat/eval/evaluator/__init__.py +14 -0
- nat/eval/evaluator/base_evaluator.py +77 -0
- nat/eval/evaluator/evaluator_model.py +58 -0
- nat/eval/intermediate_step_adapter.py +99 -0
- nat/eval/rag_evaluator/__init__.py +0 -0
- nat/eval/rag_evaluator/evaluate.py +178 -0
- nat/eval/rag_evaluator/register.py +143 -0
- nat/eval/register.py +26 -0
- nat/eval/remote_workflow.py +133 -0
- nat/eval/runners/__init__.py +14 -0
- nat/eval/runners/config.py +39 -0
- nat/eval/runners/multi_eval_runner.py +54 -0
- nat/eval/runtime_evaluator/__init__.py +14 -0
- nat/eval/runtime_evaluator/evaluate.py +123 -0
- nat/eval/runtime_evaluator/register.py +100 -0
- nat/eval/runtime_event_subscriber.py +52 -0
- nat/eval/swe_bench_evaluator/__init__.py +0 -0
- nat/eval/swe_bench_evaluator/evaluate.py +215 -0
- nat/eval/swe_bench_evaluator/register.py +36 -0
- nat/eval/trajectory_evaluator/__init__.py +0 -0
- nat/eval/trajectory_evaluator/evaluate.py +75 -0
- nat/eval/trajectory_evaluator/register.py +40 -0
- nat/eval/tunable_rag_evaluator/__init__.py +0 -0
- nat/eval/tunable_rag_evaluator/evaluate.py +242 -0
- nat/eval/tunable_rag_evaluator/register.py +52 -0
- nat/eval/usage_stats.py +41 -0
- nat/eval/utils/__init__.py +0 -0
- nat/eval/utils/eval_trace_ctx.py +89 -0
- nat/eval/utils/output_uploader.py +140 -0
- nat/eval/utils/tqdm_position_registry.py +40 -0
- nat/eval/utils/weave_eval.py +193 -0
- nat/experimental/__init__.py +0 -0
- nat/experimental/decorators/__init__.py +0 -0
- nat/experimental/decorators/experimental_warning_decorator.py +154 -0
- nat/experimental/test_time_compute/__init__.py +0 -0
- nat/experimental/test_time_compute/editing/__init__.py +0 -0
- nat/experimental/test_time_compute/editing/iterative_plan_refinement_editor.py +147 -0
- nat/experimental/test_time_compute/editing/llm_as_a_judge_editor.py +204 -0
- nat/experimental/test_time_compute/editing/motivation_aware_summarization.py +107 -0
- nat/experimental/test_time_compute/functions/__init__.py +0 -0
- nat/experimental/test_time_compute/functions/execute_score_select_function.py +105 -0
- nat/experimental/test_time_compute/functions/plan_select_execute_function.py +228 -0
- nat/experimental/test_time_compute/functions/ttc_tool_orchestration_function.py +205 -0
- nat/experimental/test_time_compute/functions/ttc_tool_wrapper_function.py +146 -0
- nat/experimental/test_time_compute/models/__init__.py +0 -0
- nat/experimental/test_time_compute/models/editor_config.py +132 -0
- nat/experimental/test_time_compute/models/scoring_config.py +112 -0
- nat/experimental/test_time_compute/models/search_config.py +120 -0
- nat/experimental/test_time_compute/models/selection_config.py +154 -0
- nat/experimental/test_time_compute/models/stage_enums.py +43 -0
- nat/experimental/test_time_compute/models/strategy_base.py +67 -0
- nat/experimental/test_time_compute/models/tool_use_config.py +41 -0
- nat/experimental/test_time_compute/models/ttc_item.py +48 -0
- nat/experimental/test_time_compute/register.py +35 -0
- nat/experimental/test_time_compute/scoring/__init__.py +0 -0
- nat/experimental/test_time_compute/scoring/llm_based_agent_scorer.py +168 -0
- nat/experimental/test_time_compute/scoring/llm_based_plan_scorer.py +168 -0
- nat/experimental/test_time_compute/scoring/motivation_aware_scorer.py +111 -0
- nat/experimental/test_time_compute/search/__init__.py +0 -0
- nat/experimental/test_time_compute/search/multi_llm_planner.py +128 -0
- nat/experimental/test_time_compute/search/multi_query_retrieval_search.py +122 -0
- nat/experimental/test_time_compute/search/single_shot_multi_plan_planner.py +128 -0
- nat/experimental/test_time_compute/selection/__init__.py +0 -0
- nat/experimental/test_time_compute/selection/best_of_n_selector.py +63 -0
- nat/experimental/test_time_compute/selection/llm_based_agent_output_selector.py +131 -0
- nat/experimental/test_time_compute/selection/llm_based_output_merging_selector.py +157 -0
- nat/experimental/test_time_compute/selection/llm_based_plan_selector.py +128 -0
- nat/experimental/test_time_compute/selection/threshold_selector.py +58 -0
- nat/front_ends/__init__.py +14 -0
- nat/front_ends/console/__init__.py +14 -0
- nat/front_ends/console/authentication_flow_handler.py +285 -0
- nat/front_ends/console/console_front_end_config.py +32 -0
- nat/front_ends/console/console_front_end_plugin.py +108 -0
- nat/front_ends/console/register.py +25 -0
- nat/front_ends/cron/__init__.py +14 -0
- nat/front_ends/fastapi/__init__.py +14 -0
- nat/front_ends/fastapi/auth_flow_handlers/__init__.py +0 -0
- nat/front_ends/fastapi/auth_flow_handlers/http_flow_handler.py +27 -0
- nat/front_ends/fastapi/auth_flow_handlers/websocket_flow_handler.py +142 -0
- nat/front_ends/fastapi/dask_client_mixin.py +65 -0
- nat/front_ends/fastapi/fastapi_front_end_config.py +272 -0
- nat/front_ends/fastapi/fastapi_front_end_controller.py +68 -0
- nat/front_ends/fastapi/fastapi_front_end_plugin.py +247 -0
- nat/front_ends/fastapi/fastapi_front_end_plugin_worker.py +1257 -0
- nat/front_ends/fastapi/html_snippets/__init__.py +14 -0
- nat/front_ends/fastapi/html_snippets/auth_code_grant_success.py +35 -0
- nat/front_ends/fastapi/intermediate_steps_subscriber.py +80 -0
- nat/front_ends/fastapi/job_store.py +602 -0
- nat/front_ends/fastapi/main.py +64 -0
- nat/front_ends/fastapi/message_handler.py +344 -0
- nat/front_ends/fastapi/message_validator.py +351 -0
- nat/front_ends/fastapi/register.py +25 -0
- nat/front_ends/fastapi/response_helpers.py +195 -0
- nat/front_ends/fastapi/step_adaptor.py +319 -0
- nat/front_ends/fastapi/utils.py +57 -0
- nat/front_ends/mcp/__init__.py +14 -0
- nat/front_ends/mcp/introspection_token_verifier.py +73 -0
- nat/front_ends/mcp/mcp_front_end_config.py +90 -0
- nat/front_ends/mcp/mcp_front_end_plugin.py +113 -0
- nat/front_ends/mcp/mcp_front_end_plugin_worker.py +268 -0
- nat/front_ends/mcp/memory_profiler.py +320 -0
- nat/front_ends/mcp/register.py +27 -0
- nat/front_ends/mcp/tool_converter.py +290 -0
- nat/front_ends/register.py +21 -0
- nat/front_ends/simple_base/__init__.py +14 -0
- nat/front_ends/simple_base/simple_front_end_plugin_base.py +56 -0
- nat/llm/__init__.py +0 -0
- nat/llm/aws_bedrock_llm.py +69 -0
- nat/llm/azure_openai_llm.py +57 -0
- nat/llm/litellm_llm.py +69 -0
- nat/llm/nim_llm.py +58 -0
- nat/llm/openai_llm.py +54 -0
- nat/llm/register.py +27 -0
- nat/llm/utils/__init__.py +14 -0
- nat/llm/utils/env_config_value.py +93 -0
- nat/llm/utils/error.py +17 -0
- nat/llm/utils/thinking.py +215 -0
- nat/memory/__init__.py +20 -0
- nat/memory/interfaces.py +183 -0
- nat/memory/models.py +112 -0
- nat/meta/pypi.md +58 -0
- nat/object_store/__init__.py +20 -0
- nat/object_store/in_memory_object_store.py +76 -0
- nat/object_store/interfaces.py +84 -0
- nat/object_store/models.py +38 -0
- nat/object_store/register.py +19 -0
- nat/observability/__init__.py +14 -0
- nat/observability/exporter/__init__.py +14 -0
- nat/observability/exporter/base_exporter.py +449 -0
- nat/observability/exporter/exporter.py +78 -0
- nat/observability/exporter/file_exporter.py +33 -0
- nat/observability/exporter/processing_exporter.py +550 -0
- nat/observability/exporter/raw_exporter.py +52 -0
- nat/observability/exporter/span_exporter.py +308 -0
- nat/observability/exporter_manager.py +335 -0
- nat/observability/mixin/__init__.py +14 -0
- nat/observability/mixin/batch_config_mixin.py +26 -0
- nat/observability/mixin/collector_config_mixin.py +23 -0
- nat/observability/mixin/file_mixin.py +288 -0
- nat/observability/mixin/file_mode.py +23 -0
- nat/observability/mixin/redaction_config_mixin.py +42 -0
- nat/observability/mixin/resource_conflict_mixin.py +134 -0
- nat/observability/mixin/serialize_mixin.py +61 -0
- nat/observability/mixin/tagging_config_mixin.py +62 -0
- nat/observability/mixin/type_introspection_mixin.py +496 -0
- nat/observability/processor/__init__.py +14 -0
- nat/observability/processor/batching_processor.py +308 -0
- nat/observability/processor/callback_processor.py +42 -0
- nat/observability/processor/falsy_batch_filter_processor.py +55 -0
- nat/observability/processor/intermediate_step_serializer.py +28 -0
- nat/observability/processor/processor.py +74 -0
- nat/observability/processor/processor_factory.py +70 -0
- nat/observability/processor/redaction/__init__.py +24 -0
- nat/observability/processor/redaction/contextual_redaction_processor.py +125 -0
- nat/observability/processor/redaction/contextual_span_redaction_processor.py +66 -0
- nat/observability/processor/redaction/redaction_processor.py +177 -0
- nat/observability/processor/redaction/span_header_redaction_processor.py +92 -0
- nat/observability/processor/span_tagging_processor.py +68 -0
- nat/observability/register.py +114 -0
- nat/observability/utils/__init__.py +14 -0
- nat/observability/utils/dict_utils.py +236 -0
- nat/observability/utils/time_utils.py +31 -0
- nat/plugins/.namespace +1 -0
- nat/profiler/__init__.py +0 -0
- nat/profiler/calc/__init__.py +14 -0
- nat/profiler/calc/calc_runner.py +626 -0
- nat/profiler/calc/calculations.py +288 -0
- nat/profiler/calc/data_models.py +188 -0
- nat/profiler/calc/plot.py +345 -0
- nat/profiler/callbacks/__init__.py +0 -0
- nat/profiler/callbacks/agno_callback_handler.py +295 -0
- nat/profiler/callbacks/base_callback_class.py +20 -0
- nat/profiler/callbacks/langchain_callback_handler.py +297 -0
- nat/profiler/callbacks/llama_index_callback_handler.py +205 -0
- nat/profiler/callbacks/semantic_kernel_callback_handler.py +238 -0
- nat/profiler/callbacks/token_usage_base_model.py +27 -0
- nat/profiler/data_frame_row.py +51 -0
- nat/profiler/data_models.py +24 -0
- nat/profiler/decorators/__init__.py +0 -0
- nat/profiler/decorators/framework_wrapper.py +180 -0
- nat/profiler/decorators/function_tracking.py +411 -0
- nat/profiler/forecasting/__init__.py +0 -0
- nat/profiler/forecasting/config.py +18 -0
- nat/profiler/forecasting/model_trainer.py +75 -0
- nat/profiler/forecasting/models/__init__.py +22 -0
- nat/profiler/forecasting/models/forecasting_base_model.py +42 -0
- nat/profiler/forecasting/models/linear_model.py +197 -0
- nat/profiler/forecasting/models/random_forest_regressor.py +269 -0
- nat/profiler/inference_metrics_model.py +28 -0
- nat/profiler/inference_optimization/__init__.py +0 -0
- nat/profiler/inference_optimization/bottleneck_analysis/__init__.py +0 -0
- nat/profiler/inference_optimization/bottleneck_analysis/nested_stack_analysis.py +460 -0
- nat/profiler/inference_optimization/bottleneck_analysis/simple_stack_analysis.py +258 -0
- nat/profiler/inference_optimization/data_models.py +386 -0
- nat/profiler/inference_optimization/experimental/__init__.py +0 -0
- nat/profiler/inference_optimization/experimental/concurrency_spike_analysis.py +468 -0
- nat/profiler/inference_optimization/experimental/prefix_span_analysis.py +404 -0
- nat/profiler/inference_optimization/llm_metrics.py +212 -0
- nat/profiler/inference_optimization/prompt_caching.py +163 -0
- nat/profiler/inference_optimization/token_uniqueness.py +107 -0
- nat/profiler/inference_optimization/workflow_runtimes.py +72 -0
- nat/profiler/intermediate_property_adapter.py +102 -0
- nat/profiler/parameter_optimization/__init__.py +0 -0
- nat/profiler/parameter_optimization/optimizable_utils.py +93 -0
- nat/profiler/parameter_optimization/optimizer_runtime.py +67 -0
- nat/profiler/parameter_optimization/parameter_optimizer.py +153 -0
- nat/profiler/parameter_optimization/parameter_selection.py +107 -0
- nat/profiler/parameter_optimization/pareto_visualizer.py +380 -0
- nat/profiler/parameter_optimization/prompt_optimizer.py +384 -0
- nat/profiler/parameter_optimization/update_helpers.py +66 -0
- nat/profiler/profile_runner.py +478 -0
- nat/profiler/utils.py +186 -0
- nat/registry_handlers/__init__.py +0 -0
- nat/registry_handlers/local/__init__.py +0 -0
- nat/registry_handlers/local/local_handler.py +176 -0
- nat/registry_handlers/local/register_local.py +37 -0
- nat/registry_handlers/metadata_factory.py +60 -0
- nat/registry_handlers/package_utils.py +570 -0
- nat/registry_handlers/pypi/__init__.py +0 -0
- nat/registry_handlers/pypi/pypi_handler.py +248 -0
- nat/registry_handlers/pypi/register_pypi.py +40 -0
- nat/registry_handlers/register.py +20 -0
- nat/registry_handlers/registry_handler_base.py +157 -0
- nat/registry_handlers/rest/__init__.py +0 -0
- nat/registry_handlers/rest/register_rest.py +56 -0
- nat/registry_handlers/rest/rest_handler.py +236 -0
- nat/registry_handlers/schemas/__init__.py +0 -0
- nat/registry_handlers/schemas/headers.py +42 -0
- nat/registry_handlers/schemas/package.py +68 -0
- nat/registry_handlers/schemas/publish.py +68 -0
- nat/registry_handlers/schemas/pull.py +82 -0
- nat/registry_handlers/schemas/remove.py +36 -0
- nat/registry_handlers/schemas/search.py +91 -0
- nat/registry_handlers/schemas/status.py +47 -0
- nat/retriever/__init__.py +0 -0
- nat/retriever/interface.py +41 -0
- nat/retriever/milvus/__init__.py +14 -0
- nat/retriever/milvus/register.py +81 -0
- nat/retriever/milvus/retriever.py +228 -0
- nat/retriever/models.py +77 -0
- nat/retriever/nemo_retriever/__init__.py +14 -0
- nat/retriever/nemo_retriever/register.py +60 -0
- nat/retriever/nemo_retriever/retriever.py +190 -0
- nat/retriever/register.py +21 -0
- nat/runtime/__init__.py +14 -0
- nat/runtime/loader.py +220 -0
- nat/runtime/runner.py +292 -0
- nat/runtime/session.py +223 -0
- nat/runtime/user_metadata.py +130 -0
- nat/settings/__init__.py +0 -0
- nat/settings/global_settings.py +329 -0
- nat/test/.namespace +1 -0
- nat/tool/__init__.py +0 -0
- nat/tool/chat_completion.py +77 -0
- nat/tool/code_execution/README.md +151 -0
- nat/tool/code_execution/__init__.py +0 -0
- nat/tool/code_execution/code_sandbox.py +267 -0
- nat/tool/code_execution/local_sandbox/.gitignore +1 -0
- nat/tool/code_execution/local_sandbox/Dockerfile.sandbox +60 -0
- nat/tool/code_execution/local_sandbox/__init__.py +13 -0
- nat/tool/code_execution/local_sandbox/local_sandbox_server.py +198 -0
- nat/tool/code_execution/local_sandbox/sandbox.requirements.txt +6 -0
- nat/tool/code_execution/local_sandbox/start_local_sandbox.sh +50 -0
- nat/tool/code_execution/register.py +74 -0
- nat/tool/code_execution/test_code_execution_sandbox.py +414 -0
- nat/tool/code_execution/utils.py +100 -0
- nat/tool/datetime_tools.py +82 -0
- nat/tool/document_search.py +141 -0
- nat/tool/github_tools.py +450 -0
- nat/tool/memory_tools/__init__.py +0 -0
- nat/tool/memory_tools/add_memory_tool.py +79 -0
- nat/tool/memory_tools/delete_memory_tool.py +66 -0
- nat/tool/memory_tools/get_memory_tool.py +72 -0
- nat/tool/nvidia_rag.py +95 -0
- nat/tool/register.py +31 -0
- nat/tool/retriever.py +95 -0
- nat/tool/server_tools.py +66 -0
- nat/utils/__init__.py +0 -0
- nat/utils/callable_utils.py +70 -0
- nat/utils/data_models/__init__.py +0 -0
- nat/utils/data_models/schema_validator.py +58 -0
- nat/utils/debugging_utils.py +43 -0
- nat/utils/decorators.py +210 -0
- nat/utils/dump_distro_mapping.py +32 -0
- nat/utils/exception_handlers/__init__.py +0 -0
- nat/utils/exception_handlers/automatic_retries.py +342 -0
- nat/utils/exception_handlers/schemas.py +114 -0
- nat/utils/io/__init__.py +0 -0
- nat/utils/io/model_processing.py +28 -0
- nat/utils/io/yaml_tools.py +119 -0
- nat/utils/log_levels.py +25 -0
- nat/utils/log_utils.py +37 -0
- nat/utils/metadata_utils.py +74 -0
- nat/utils/optional_imports.py +142 -0
- nat/utils/producer_consumer_queue.py +178 -0
- nat/utils/reactive/__init__.py +0 -0
- nat/utils/reactive/base/__init__.py +0 -0
- nat/utils/reactive/base/observable_base.py +65 -0
- nat/utils/reactive/base/observer_base.py +55 -0
- nat/utils/reactive/base/subject_base.py +79 -0
- nat/utils/reactive/observable.py +59 -0
- nat/utils/reactive/observer.py +76 -0
- nat/utils/reactive/subject.py +131 -0
- nat/utils/reactive/subscription.py +49 -0
- nat/utils/settings/__init__.py +0 -0
- nat/utils/settings/global_settings.py +195 -0
- nat/utils/string_utils.py +38 -0
- nat/utils/type_converter.py +299 -0
- nat/utils/type_utils.py +488 -0
- nat/utils/url_utils.py +27 -0
- nvidia_nat-1.1.0a20251020.dist-info/METADATA +195 -0
- nvidia_nat-1.1.0a20251020.dist-info/RECORD +480 -0
- nvidia_nat-1.1.0a20251020.dist-info/WHEEL +5 -0
- nvidia_nat-1.1.0a20251020.dist-info/entry_points.txt +22 -0
- nvidia_nat-1.1.0a20251020.dist-info/licenses/LICENSE-3rd-party.txt +5478 -0
- nvidia_nat-1.1.0a20251020.dist-info/licenses/LICENSE.md +201 -0
- nvidia_nat-1.1.0a20251020.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import logging
|
|
17
|
+
|
|
18
|
+
from langchain.evaluation import TrajectoryEvalChain
|
|
19
|
+
from langchain_core.language_models import BaseChatModel
|
|
20
|
+
from langchain_core.tools import BaseTool
|
|
21
|
+
|
|
22
|
+
from nat.eval.evaluator.base_evaluator import BaseEvaluator
|
|
23
|
+
from nat.eval.evaluator.evaluator_model import EvalInputItem
|
|
24
|
+
from nat.eval.evaluator.evaluator_model import EvalOutputItem
|
|
25
|
+
|
|
26
|
+
logger = logging.getLogger(__name__)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class TrajectoryEvaluator(BaseEvaluator):
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
llm: BaseChatModel,
|
|
34
|
+
tools: list[BaseTool] | None = None,
|
|
35
|
+
max_concurrency: int = 8,
|
|
36
|
+
):
|
|
37
|
+
super().__init__(max_concurrency=max_concurrency, tqdm_desc="Evaluating Trajectory")
|
|
38
|
+
self.llm = llm
|
|
39
|
+
self.tools = tools
|
|
40
|
+
# Initialize trajectory evaluation chain
|
|
41
|
+
self.traj_eval_chain = TrajectoryEvalChain.from_llm(llm=self.llm,
|
|
42
|
+
tools=self.tools,
|
|
43
|
+
return_reasoning=True,
|
|
44
|
+
requires_reference=True)
|
|
45
|
+
logger.debug("Trajectory evaluation chain initialized.")
|
|
46
|
+
|
|
47
|
+
async def evaluate_item(self, item: EvalInputItem) -> EvalOutputItem:
|
|
48
|
+
"""
|
|
49
|
+
Evaluate a single EvalInputItem and return an EvalOutputItem.
|
|
50
|
+
"""
|
|
51
|
+
from nat.data_models.intermediate_step import IntermediateStepType
|
|
52
|
+
from nat.eval.intermediate_step_adapter import IntermediateStepAdapter
|
|
53
|
+
|
|
54
|
+
intermediate_step_adapter = IntermediateStepAdapter()
|
|
55
|
+
event_filter = [IntermediateStepType.LLM_END, IntermediateStepType.TOOL_END]
|
|
56
|
+
|
|
57
|
+
question = item.input_obj
|
|
58
|
+
generated_answer = item.output_obj
|
|
59
|
+
agent_trajectory = intermediate_step_adapter.get_agent_actions(item.trajectory, event_filter)
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
eval_result = await self.traj_eval_chain.aevaluate_agent_trajectory(
|
|
63
|
+
input=question,
|
|
64
|
+
agent_trajectory=agent_trajectory,
|
|
65
|
+
prediction=generated_answer,
|
|
66
|
+
)
|
|
67
|
+
except Exception as e:
|
|
68
|
+
logger.exception("Error evaluating trajectory for question: %s, Error: %s", question, e)
|
|
69
|
+
return EvalOutputItem(id=item.id, score=0.0, reasoning=f"Error evaluating trajectory: {e}")
|
|
70
|
+
|
|
71
|
+
reasoning = {
|
|
72
|
+
"reasoning": eval_result["reasoning"],
|
|
73
|
+
"trajectory": [(action.model_dump(), output) for (action, output) in agent_trajectory]
|
|
74
|
+
}
|
|
75
|
+
return EvalOutputItem(id=item.id, score=eval_result["score"], reasoning=reasoning)
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from pydantic import Field
|
|
17
|
+
|
|
18
|
+
from nat.builder.builder import EvalBuilder
|
|
19
|
+
from nat.builder.evaluator import EvaluatorInfo
|
|
20
|
+
from nat.cli.register_workflow import register_evaluator
|
|
21
|
+
from nat.data_models.evaluator import EvaluatorBaseConfig
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class TrajectoryEvaluatorConfig(EvaluatorBaseConfig, name="trajectory"):
|
|
25
|
+
"""Agent Trajectory Evaluation."""
|
|
26
|
+
|
|
27
|
+
llm_name: str = Field(description="LLM as a judge.")
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@register_evaluator(config_type=TrajectoryEvaluatorConfig)
|
|
31
|
+
async def register_trajectory_evaluator(config: TrajectoryEvaluatorConfig, builder: EvalBuilder):
|
|
32
|
+
from nat.builder.framework_enum import LLMFrameworkEnum
|
|
33
|
+
|
|
34
|
+
from .evaluate import TrajectoryEvaluator
|
|
35
|
+
llm = await builder.get_llm(config.llm_name, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
|
|
36
|
+
tools = await builder.get_all_tools(wrapper_type=LLMFrameworkEnum.LANGCHAIN)
|
|
37
|
+
|
|
38
|
+
_evaluator = TrajectoryEvaluator(llm, tools, builder.get_max_concurrency())
|
|
39
|
+
|
|
40
|
+
yield EvaluatorInfo(config=config, evaluate_fn=_evaluator.evaluate, description="Trajectory Evaluator")
|
|
File without changes
|
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import logging
|
|
17
|
+
from collections.abc import Callable
|
|
18
|
+
|
|
19
|
+
from langchain.output_parsers import ResponseSchema
|
|
20
|
+
from langchain.output_parsers import StructuredOutputParser
|
|
21
|
+
from langchain.schema import HumanMessage
|
|
22
|
+
from langchain.schema import SystemMessage
|
|
23
|
+
from langchain_core.language_models import BaseChatModel
|
|
24
|
+
from langchain_core.runnables import RunnableLambda
|
|
25
|
+
|
|
26
|
+
from nat.eval.evaluator.base_evaluator import BaseEvaluator
|
|
27
|
+
from nat.eval.evaluator.evaluator_model import EvalInputItem
|
|
28
|
+
from nat.eval.evaluator.evaluator_model import EvalOutputItem
|
|
29
|
+
|
|
30
|
+
logger = logging.getLogger(__name__)
|
|
31
|
+
|
|
32
|
+
# flake8: noqa: E501
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def evaluation_prompt(judge_llm_prompt: str,
|
|
36
|
+
question: str,
|
|
37
|
+
answer_description: str,
|
|
38
|
+
generated_answer: str,
|
|
39
|
+
format_instructions: str,
|
|
40
|
+
default_scoring: bool):
|
|
41
|
+
"""
|
|
42
|
+
This function generates a prompt for the judge LLM to evaluate the generated answer.
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
DEFAULT_SCORING_INSTRUCTIONS = """
|
|
46
|
+
The coverage score is a measure of how well the generated answer covers the critical aspects mentioned in the expected answer. A low coverage score indicates that the generated answer misses critical aspects of the expected answer. A middle coverage score indicates that the generated answer covers some of the must-haves of the expected answer but lacks other details. A high coverage score indicates that all of the expected aspects are present in the generated answer.
|
|
47
|
+
The correctness score is a measure of how well the generated answer matches the expected answer. A low correctness score indicates that the generated answer is incorrect or does not match the expected answer. A middle correctness score indicates that the generated answer is correct but lacks some details. A high correctness score indicates that the generated answer is exactly the same as the expected answer.
|
|
48
|
+
The relevance score is a measure of how well the generated answer is relevant to the question. A low relevance score indicates that the generated answer is not relevant to the question. A middle relevance score indicates that the generated answer is somewhat relevant to the question. A high relevance score indicates that the generated answer is exactly relevant to the question.
|
|
49
|
+
The reasoning is a 1-2 sentence explanation for the scoring.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
DEFAULT_EVAL_PROMPT = (f"You are an intelligent assistant that responds strictly in JSON format."
|
|
53
|
+
f"Judge based on the following scoring rubric: {DEFAULT_SCORING_INSTRUCTIONS}"
|
|
54
|
+
f"{judge_llm_prompt}\n"
|
|
55
|
+
f"{format_instructions}\n"
|
|
56
|
+
f"Here is the user's query: {question}"
|
|
57
|
+
f"Here is the description of the expected answer: {answer_description}"
|
|
58
|
+
f"Here is the generated answer: {generated_answer}")
|
|
59
|
+
|
|
60
|
+
EVAL_PROMPT = (f"You are an intelligent assistant that responds strictly in JSON format. {judge_llm_prompt}\n"
|
|
61
|
+
f"{format_instructions}\n"
|
|
62
|
+
f"Here is the user's query: {question}"
|
|
63
|
+
f"Here is the description of the expected answer: {answer_description}"
|
|
64
|
+
f"Here is the generated answer: {generated_answer}")
|
|
65
|
+
|
|
66
|
+
return EVAL_PROMPT if not default_scoring else DEFAULT_EVAL_PROMPT
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def runnable_with_retries(original_fn: Callable, llm_retry_control_params: dict | None = None):
|
|
70
|
+
runnable = RunnableLambda(original_fn)
|
|
71
|
+
|
|
72
|
+
if llm_retry_control_params is None:
|
|
73
|
+
llm_retry_control_params = {
|
|
74
|
+
"stop_after_attempt": 3, "initial_backoff_delay_seconds": 1, "has_exponential_jitter": True
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
if llm_retry_control_params["has_exponential_jitter"] is None:
|
|
78
|
+
llm_retry_control_params["has_exponential_jitter"] = True
|
|
79
|
+
if llm_retry_control_params["stop_after_attempt"] is None:
|
|
80
|
+
llm_retry_control_params["stop_after_attempt"] = 3
|
|
81
|
+
if llm_retry_control_params["initial_backoff_delay_seconds"] is None:
|
|
82
|
+
llm_retry_control_params["initial_backoff_delay_seconds"] = 1
|
|
83
|
+
|
|
84
|
+
# Add retry logic with exponential backoff and jitter
|
|
85
|
+
return runnable.with_retry(
|
|
86
|
+
retry_if_exception_type=(Exception, ), # Retry on any error
|
|
87
|
+
wait_exponential_jitter=llm_retry_control_params["has_exponential_jitter"], # Add jitter to exponential backoff
|
|
88
|
+
stop_after_attempt=llm_retry_control_params["stop_after_attempt"],
|
|
89
|
+
exponential_jitter_params={"initial": llm_retry_control_params["initial_backoff_delay_seconds"]
|
|
90
|
+
} # Optional: set initial backoff (seconds)
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
class TunableRagEvaluator(BaseEvaluator):
|
|
95
|
+
'''Tunable RAG evaluator class with customizable LLM prompt for scoring.'''
|
|
96
|
+
|
|
97
|
+
def __init__(self,
|
|
98
|
+
llm: BaseChatModel,
|
|
99
|
+
judge_llm_prompt: str,
|
|
100
|
+
llm_retry_control_params: dict | None,
|
|
101
|
+
max_concurrency: int,
|
|
102
|
+
default_scoring: bool,
|
|
103
|
+
default_score_weights: dict):
|
|
104
|
+
super().__init__(max_concurrency=max_concurrency, tqdm_desc="Evaluating RAG")
|
|
105
|
+
self.llm = llm
|
|
106
|
+
self.judge_llm_prompt = judge_llm_prompt
|
|
107
|
+
self.llm_retry_control_params = llm_retry_control_params
|
|
108
|
+
self.default_scoring = default_scoring
|
|
109
|
+
# Use user-provided weights if available; otherwise, set equal weights for each score
|
|
110
|
+
self.default_score_weights = default_score_weights if default_score_weights else {
|
|
111
|
+
"coverage": 1 / 3, "correctness": 1 / 3, "relevance": 1 / 3
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
async def evaluate_item(self, item: EvalInputItem) -> EvalOutputItem:
|
|
115
|
+
"""Compute RAG evaluation for an individual item and return EvalOutputItem"""
|
|
116
|
+
question = item.input_obj
|
|
117
|
+
answer_description = item.expected_output_obj
|
|
118
|
+
generated_answer = item.output_obj
|
|
119
|
+
|
|
120
|
+
# Call judge LLM to generate score
|
|
121
|
+
score = 0.0
|
|
122
|
+
|
|
123
|
+
default_evaluation_schema = [
|
|
124
|
+
ResponseSchema(
|
|
125
|
+
name="coverage_score",
|
|
126
|
+
description="Score for the coverage of all critical aspects mentioned in the expected answer. Ex. 0.5",
|
|
127
|
+
type="float"),
|
|
128
|
+
ResponseSchema(
|
|
129
|
+
name="correctness_score",
|
|
130
|
+
description="Score for the accuracy of the generated answer compared to the expected answer. Ex. 0.5",
|
|
131
|
+
type="float"),
|
|
132
|
+
ResponseSchema(name="relevance_score",
|
|
133
|
+
description="Score for the relevance of the generated answer to the question. Ex. 0.5",
|
|
134
|
+
type="float"),
|
|
135
|
+
ResponseSchema(
|
|
136
|
+
name="reasoning",
|
|
137
|
+
description=
|
|
138
|
+
"1-2 summarized sentences of reasoning for the scores. Ex. 'The generated answer covers all critical aspects mentioned in the expected answer, is correct, and is relevant to the question.'",
|
|
139
|
+
type="string"),
|
|
140
|
+
]
|
|
141
|
+
|
|
142
|
+
custom_evaluation_schema = [
|
|
143
|
+
ResponseSchema(name="score", description="Score for the generated answer. Ex. 0.5", type="float"),
|
|
144
|
+
ResponseSchema(
|
|
145
|
+
name="reasoning",
|
|
146
|
+
description=
|
|
147
|
+
"1-2 sentence reasoning for the score. Ex. 'The generated answer is exactly the same as the description of the expected answer.'",
|
|
148
|
+
type="string"),
|
|
149
|
+
]
|
|
150
|
+
|
|
151
|
+
if self.default_scoring:
|
|
152
|
+
evaluation_schema = default_evaluation_schema
|
|
153
|
+
else:
|
|
154
|
+
evaluation_schema = custom_evaluation_schema
|
|
155
|
+
|
|
156
|
+
llm_input_response_parser = StructuredOutputParser.from_response_schemas(evaluation_schema)
|
|
157
|
+
format_instructions = llm_input_response_parser.get_format_instructions()
|
|
158
|
+
|
|
159
|
+
eval_prompt = evaluation_prompt(judge_llm_prompt=self.judge_llm_prompt,
|
|
160
|
+
question=question,
|
|
161
|
+
answer_description=answer_description,
|
|
162
|
+
generated_answer=generated_answer,
|
|
163
|
+
format_instructions=format_instructions,
|
|
164
|
+
default_scoring=self.default_scoring)
|
|
165
|
+
|
|
166
|
+
messages = [SystemMessage(content="You must respond only in JSON format."), HumanMessage(content=eval_prompt)]
|
|
167
|
+
|
|
168
|
+
response = await runnable_with_retries(self.llm.ainvoke, self.llm_retry_control_params).ainvoke(messages)
|
|
169
|
+
|
|
170
|
+
# Initialize default values to handle service errors
|
|
171
|
+
coverage_score = 0.0
|
|
172
|
+
correctness_score = 0.0
|
|
173
|
+
relevance_score = 0.0
|
|
174
|
+
reasoning = "Error in evaluator from parsing judge LLM response."
|
|
175
|
+
|
|
176
|
+
try:
|
|
177
|
+
parsed_response = llm_input_response_parser.parse(response.content)
|
|
178
|
+
if self.default_scoring:
|
|
179
|
+
try:
|
|
180
|
+
coverage_score = parsed_response["coverage_score"]
|
|
181
|
+
correctness_score = parsed_response["correctness_score"]
|
|
182
|
+
relevance_score = parsed_response["relevance_score"]
|
|
183
|
+
reasoning = parsed_response["reasoning"]
|
|
184
|
+
except KeyError as e:
|
|
185
|
+
logger.exception("Missing required keys in default scoring response: %s",
|
|
186
|
+
", ".join(str(arg) for arg in e.args))
|
|
187
|
+
reasoning = f"Error in evaluator from parsing judge LLM response. Missing required key(s): {', '.join(str(arg) for arg in e.args)}"
|
|
188
|
+
|
|
189
|
+
coverage_weight = self.default_score_weights.get("coverage", 1 / 3)
|
|
190
|
+
correctness_weight = self.default_score_weights.get("correctness", 1 / 3)
|
|
191
|
+
relevance_weight = self.default_score_weights.get("relevance", 1 / 3)
|
|
192
|
+
|
|
193
|
+
# Calculate score
|
|
194
|
+
total_weight = coverage_weight + correctness_weight + relevance_weight
|
|
195
|
+
coverage_weight = coverage_weight / total_weight
|
|
196
|
+
correctness_weight = correctness_weight / total_weight
|
|
197
|
+
relevance_weight = relevance_weight / total_weight
|
|
198
|
+
|
|
199
|
+
if round(coverage_weight + correctness_weight + relevance_weight, 2) != 1:
|
|
200
|
+
logger.warning("The sum of the default score weights is not 1. The weights will be normalized.")
|
|
201
|
+
coverage_weight = coverage_weight / (coverage_weight + correctness_weight + relevance_weight)
|
|
202
|
+
correctness_weight = correctness_weight / (coverage_weight + correctness_weight + relevance_weight)
|
|
203
|
+
relevance_weight = relevance_weight / (coverage_weight + correctness_weight + relevance_weight)
|
|
204
|
+
|
|
205
|
+
score = (coverage_weight * coverage_score + correctness_weight * correctness_score +
|
|
206
|
+
relevance_weight * relevance_score)
|
|
207
|
+
|
|
208
|
+
else:
|
|
209
|
+
try:
|
|
210
|
+
score = parsed_response["score"]
|
|
211
|
+
reasoning = parsed_response["reasoning"]
|
|
212
|
+
except KeyError as e:
|
|
213
|
+
logger.error("Missing required keys in custom scoring response: %s",
|
|
214
|
+
", ".join(str(arg) for arg in e.args))
|
|
215
|
+
reasoning = f"Error in evaluator from parsing judge LLM response. Missing required key(s): {', '.join(str(arg) for arg in e.args)}"
|
|
216
|
+
raise
|
|
217
|
+
except (KeyError, ValueError) as e:
|
|
218
|
+
logger.exception("Error parsing judge LLM response: %s", e)
|
|
219
|
+
score = 0.0
|
|
220
|
+
reasoning = "Error in evaluator from parsing judge LLM response."
|
|
221
|
+
|
|
222
|
+
if self.default_scoring:
|
|
223
|
+
reasoning = {
|
|
224
|
+
"question": question,
|
|
225
|
+
"answer_description": answer_description,
|
|
226
|
+
"generated_answer": generated_answer,
|
|
227
|
+
"score_breakdown": {
|
|
228
|
+
"coverage_score": coverage_score,
|
|
229
|
+
"correctness_score": correctness_score,
|
|
230
|
+
"relevance_score": relevance_score,
|
|
231
|
+
},
|
|
232
|
+
"reasoning": reasoning,
|
|
233
|
+
}
|
|
234
|
+
else:
|
|
235
|
+
reasoning = {
|
|
236
|
+
"question": question,
|
|
237
|
+
"answer_description": answer_description,
|
|
238
|
+
"generated_answer": generated_answer,
|
|
239
|
+
"reasoning": reasoning
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
return EvalOutputItem(id=item.id, score=score, reasoning=reasoning)
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from pydantic import Field
|
|
17
|
+
|
|
18
|
+
from nat.builder.builder import EvalBuilder
|
|
19
|
+
from nat.builder.evaluator import EvaluatorInfo
|
|
20
|
+
from nat.builder.framework_enum import LLMFrameworkEnum
|
|
21
|
+
from nat.cli.register_workflow import register_evaluator
|
|
22
|
+
from nat.data_models.component_ref import LLMRef
|
|
23
|
+
from nat.data_models.evaluator import EvaluatorBaseConfig
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class TunableRagEvaluatorConfig(EvaluatorBaseConfig, name="tunable_rag_evaluator"):
|
|
27
|
+
'''Configuration for tunable RAG evaluator'''
|
|
28
|
+
llm_name: LLMRef = Field(description="Name of the judge LLM")
|
|
29
|
+
llm_retry_control_params: dict | None = Field(description="Parameters to control LLM retry behavior", default=None)
|
|
30
|
+
judge_llm_prompt: str = Field(description="LLM prompt for the judge LLM")
|
|
31
|
+
default_scoring: bool = Field(description="Whether to use default scoring", default=False)
|
|
32
|
+
default_score_weights: dict = Field(
|
|
33
|
+
default={
|
|
34
|
+
"coverage": 0.5, "correctness": 0.3, "relevance": 0.2
|
|
35
|
+
},
|
|
36
|
+
description="Weights for the different scoring components when using default scoring")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@register_evaluator(config_type=TunableRagEvaluatorConfig)
|
|
40
|
+
async def register_tunable_rag_evaluator(config: TunableRagEvaluatorConfig, builder: EvalBuilder):
|
|
41
|
+
'''Register tunable RAG evaluator'''
|
|
42
|
+
from .evaluate import TunableRagEvaluator
|
|
43
|
+
|
|
44
|
+
llm = await builder.get_llm(config.llm_name, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
|
|
45
|
+
evaluator = TunableRagEvaluator(llm,
|
|
46
|
+
config.judge_llm_prompt,
|
|
47
|
+
config.llm_retry_control_params,
|
|
48
|
+
builder.get_max_concurrency(),
|
|
49
|
+
config.default_scoring,
|
|
50
|
+
config.default_score_weights)
|
|
51
|
+
|
|
52
|
+
yield EvaluatorInfo(config=config, evaluate_fn=evaluator.evaluate, description="Tunable RAG Evaluator")
|
nat/eval/usage_stats.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import typing
|
|
17
|
+
|
|
18
|
+
from pydantic import BaseModel
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class UsageStatsLLM(BaseModel):
|
|
22
|
+
prompt_tokens: int = 0
|
|
23
|
+
completion_tokens: int = 0
|
|
24
|
+
total_tokens: int = 0
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class UsageStatsItem(BaseModel):
|
|
28
|
+
usage_stats_per_llm: dict[str, UsageStatsLLM]
|
|
29
|
+
total_tokens: int | None = None
|
|
30
|
+
runtime: float = 0.0
|
|
31
|
+
min_timestamp: float = 0.0
|
|
32
|
+
max_timestamp: float = 0.0
|
|
33
|
+
llm_latency: float = 0.0
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class UsageStats(BaseModel):
|
|
37
|
+
# key is the id or input_obj from EvalInputItem
|
|
38
|
+
min_timestamp: float = 0.0
|
|
39
|
+
max_timestamp: float = 0.0
|
|
40
|
+
total_runtime: float = 0.0
|
|
41
|
+
usage_stats_items: dict[typing.Any, UsageStatsItem] = {}
|
|
File without changes
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import logging
|
|
17
|
+
from collections.abc import Callable
|
|
18
|
+
from contextlib import contextmanager
|
|
19
|
+
from typing import Any
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
# Type alias for evaluation call objects that have an optional 'id' attribute
|
|
24
|
+
EvalCallType = Any # Could be Weave Call object or other tracing framework objects
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class EvalTraceContext:
|
|
28
|
+
"""
|
|
29
|
+
Evaluation trace context manager for coordinating traces.
|
|
30
|
+
|
|
31
|
+
This class provides a framework-agnostic way to:
|
|
32
|
+
1. Track evaluation calls/contexts
|
|
33
|
+
2. Ensure proper parent-child relationships in traces
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(self):
|
|
37
|
+
self.eval_call: EvalCallType | None = None # Store the evaluation call/context for propagation
|
|
38
|
+
|
|
39
|
+
def set_eval_call(self, eval_call: EvalCallType | None) -> None:
|
|
40
|
+
"""Set the evaluation call/context for propagation to traces."""
|
|
41
|
+
self.eval_call = eval_call
|
|
42
|
+
if eval_call:
|
|
43
|
+
logger.debug("Set evaluation call context: %s", getattr(eval_call, 'id', str(eval_call)))
|
|
44
|
+
|
|
45
|
+
def get_eval_call(self) -> EvalCallType | None:
|
|
46
|
+
"""Get the current evaluation call/context."""
|
|
47
|
+
return self.eval_call
|
|
48
|
+
|
|
49
|
+
@contextmanager
|
|
50
|
+
def evaluation_context(self):
|
|
51
|
+
"""
|
|
52
|
+
Context manager that can be overridden by framework-specific implementations.
|
|
53
|
+
Default implementation is a no-op.
|
|
54
|
+
"""
|
|
55
|
+
yield
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class WeaveEvalTraceContext(EvalTraceContext):
|
|
59
|
+
"""
|
|
60
|
+
Weave-specific implementation of evaluation trace context.
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
def __init__(self):
|
|
64
|
+
super().__init__()
|
|
65
|
+
self.available = False
|
|
66
|
+
self.set_call_stack: Callable[[list[EvalCallType]], Any] | None = None
|
|
67
|
+
|
|
68
|
+
try:
|
|
69
|
+
from weave.trace.context.call_context import set_call_stack
|
|
70
|
+
self.set_call_stack = set_call_stack
|
|
71
|
+
self.available = True
|
|
72
|
+
except ImportError:
|
|
73
|
+
self.available = False
|
|
74
|
+
logger.debug("Weave not available for trace context")
|
|
75
|
+
|
|
76
|
+
@contextmanager
|
|
77
|
+
def evaluation_context(self):
|
|
78
|
+
"""Set the evaluation call as active context for Weave traces."""
|
|
79
|
+
if self.available and self.eval_call and self.set_call_stack:
|
|
80
|
+
try:
|
|
81
|
+
with self.set_call_stack([self.eval_call]):
|
|
82
|
+
logger.debug("Set Weave evaluation call context: %s",
|
|
83
|
+
getattr(self.eval_call, 'id', str(self.eval_call)))
|
|
84
|
+
yield
|
|
85
|
+
except Exception as e:
|
|
86
|
+
logger.warning("Failed to set Weave evaluation call context: %s", e)
|
|
87
|
+
yield
|
|
88
|
+
else:
|
|
89
|
+
yield
|