nvidia-nat 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiq/__init__.py +66 -0
- nat/agent/__init__.py +0 -0
- nat/agent/base.py +256 -0
- nat/agent/dual_node.py +67 -0
- nat/agent/react_agent/__init__.py +0 -0
- nat/agent/react_agent/agent.py +363 -0
- nat/agent/react_agent/output_parser.py +104 -0
- nat/agent/react_agent/prompt.py +44 -0
- nat/agent/react_agent/register.py +149 -0
- nat/agent/reasoning_agent/__init__.py +0 -0
- nat/agent/reasoning_agent/reasoning_agent.py +225 -0
- nat/agent/register.py +23 -0
- nat/agent/rewoo_agent/__init__.py +0 -0
- nat/agent/rewoo_agent/agent.py +415 -0
- nat/agent/rewoo_agent/prompt.py +110 -0
- nat/agent/rewoo_agent/register.py +157 -0
- nat/agent/tool_calling_agent/__init__.py +0 -0
- nat/agent/tool_calling_agent/agent.py +119 -0
- nat/agent/tool_calling_agent/register.py +106 -0
- nat/authentication/__init__.py +14 -0
- nat/authentication/api_key/__init__.py +14 -0
- nat/authentication/api_key/api_key_auth_provider.py +96 -0
- nat/authentication/api_key/api_key_auth_provider_config.py +124 -0
- nat/authentication/api_key/register.py +26 -0
- nat/authentication/exceptions/__init__.py +14 -0
- nat/authentication/exceptions/api_key_exceptions.py +38 -0
- nat/authentication/http_basic_auth/__init__.py +0 -0
- nat/authentication/http_basic_auth/http_basic_auth_provider.py +81 -0
- nat/authentication/http_basic_auth/register.py +30 -0
- nat/authentication/interfaces.py +93 -0
- nat/authentication/oauth2/__init__.py +14 -0
- nat/authentication/oauth2/oauth2_auth_code_flow_provider.py +107 -0
- nat/authentication/oauth2/oauth2_auth_code_flow_provider_config.py +39 -0
- nat/authentication/oauth2/register.py +25 -0
- nat/authentication/register.py +21 -0
- nat/builder/__init__.py +0 -0
- nat/builder/builder.py +285 -0
- nat/builder/component_utils.py +316 -0
- nat/builder/context.py +270 -0
- nat/builder/embedder.py +24 -0
- nat/builder/eval_builder.py +161 -0
- nat/builder/evaluator.py +29 -0
- nat/builder/framework_enum.py +24 -0
- nat/builder/front_end.py +73 -0
- nat/builder/function.py +344 -0
- nat/builder/function_base.py +380 -0
- nat/builder/function_info.py +627 -0
- nat/builder/intermediate_step_manager.py +174 -0
- nat/builder/llm.py +25 -0
- nat/builder/retriever.py +25 -0
- nat/builder/user_interaction_manager.py +78 -0
- nat/builder/workflow.py +148 -0
- nat/builder/workflow_builder.py +1117 -0
- nat/cli/__init__.py +14 -0
- nat/cli/cli_utils/__init__.py +0 -0
- nat/cli/cli_utils/config_override.py +231 -0
- nat/cli/cli_utils/validation.py +37 -0
- nat/cli/commands/__init__.py +0 -0
- nat/cli/commands/configure/__init__.py +0 -0
- nat/cli/commands/configure/channel/__init__.py +0 -0
- nat/cli/commands/configure/channel/add.py +28 -0
- nat/cli/commands/configure/channel/channel.py +34 -0
- nat/cli/commands/configure/channel/remove.py +30 -0
- nat/cli/commands/configure/channel/update.py +30 -0
- nat/cli/commands/configure/configure.py +33 -0
- nat/cli/commands/evaluate.py +139 -0
- nat/cli/commands/info/__init__.py +14 -0
- nat/cli/commands/info/info.py +37 -0
- nat/cli/commands/info/list_channels.py +32 -0
- nat/cli/commands/info/list_components.py +129 -0
- nat/cli/commands/info/list_mcp.py +304 -0
- nat/cli/commands/registry/__init__.py +14 -0
- nat/cli/commands/registry/publish.py +88 -0
- nat/cli/commands/registry/pull.py +118 -0
- nat/cli/commands/registry/registry.py +36 -0
- nat/cli/commands/registry/remove.py +108 -0
- nat/cli/commands/registry/search.py +155 -0
- nat/cli/commands/sizing/__init__.py +14 -0
- nat/cli/commands/sizing/calc.py +297 -0
- nat/cli/commands/sizing/sizing.py +27 -0
- nat/cli/commands/start.py +246 -0
- nat/cli/commands/uninstall.py +81 -0
- nat/cli/commands/validate.py +47 -0
- nat/cli/commands/workflow/__init__.py +14 -0
- nat/cli/commands/workflow/templates/__init__.py.j2 +0 -0
- nat/cli/commands/workflow/templates/config.yml.j2 +16 -0
- nat/cli/commands/workflow/templates/pyproject.toml.j2 +22 -0
- nat/cli/commands/workflow/templates/register.py.j2 +5 -0
- nat/cli/commands/workflow/templates/workflow.py.j2 +36 -0
- nat/cli/commands/workflow/workflow.py +37 -0
- nat/cli/commands/workflow/workflow_commands.py +317 -0
- nat/cli/entrypoint.py +135 -0
- nat/cli/main.py +57 -0
- nat/cli/register_workflow.py +488 -0
- nat/cli/type_registry.py +1000 -0
- nat/data_models/__init__.py +14 -0
- nat/data_models/api_server.py +716 -0
- nat/data_models/authentication.py +231 -0
- nat/data_models/common.py +171 -0
- nat/data_models/component.py +58 -0
- nat/data_models/component_ref.py +168 -0
- nat/data_models/config.py +410 -0
- nat/data_models/dataset_handler.py +169 -0
- nat/data_models/discovery_metadata.py +305 -0
- nat/data_models/embedder.py +27 -0
- nat/data_models/evaluate.py +127 -0
- nat/data_models/evaluator.py +26 -0
- nat/data_models/front_end.py +26 -0
- nat/data_models/function.py +30 -0
- nat/data_models/function_dependencies.py +72 -0
- nat/data_models/interactive.py +246 -0
- nat/data_models/intermediate_step.py +302 -0
- nat/data_models/invocation_node.py +38 -0
- nat/data_models/llm.py +27 -0
- nat/data_models/logging.py +26 -0
- nat/data_models/memory.py +27 -0
- nat/data_models/object_store.py +44 -0
- nat/data_models/profiler.py +54 -0
- nat/data_models/registry_handler.py +26 -0
- nat/data_models/retriever.py +30 -0
- nat/data_models/retry_mixin.py +35 -0
- nat/data_models/span.py +190 -0
- nat/data_models/step_adaptor.py +64 -0
- nat/data_models/streaming.py +33 -0
- nat/data_models/swe_bench_model.py +54 -0
- nat/data_models/telemetry_exporter.py +26 -0
- nat/data_models/ttc_strategy.py +30 -0
- nat/embedder/__init__.py +0 -0
- nat/embedder/nim_embedder.py +59 -0
- nat/embedder/openai_embedder.py +43 -0
- nat/embedder/register.py +22 -0
- nat/eval/__init__.py +14 -0
- nat/eval/config.py +60 -0
- nat/eval/dataset_handler/__init__.py +0 -0
- nat/eval/dataset_handler/dataset_downloader.py +106 -0
- nat/eval/dataset_handler/dataset_filter.py +52 -0
- nat/eval/dataset_handler/dataset_handler.py +367 -0
- nat/eval/evaluate.py +510 -0
- nat/eval/evaluator/__init__.py +14 -0
- nat/eval/evaluator/base_evaluator.py +77 -0
- nat/eval/evaluator/evaluator_model.py +45 -0
- nat/eval/intermediate_step_adapter.py +99 -0
- nat/eval/rag_evaluator/__init__.py +0 -0
- nat/eval/rag_evaluator/evaluate.py +178 -0
- nat/eval/rag_evaluator/register.py +143 -0
- nat/eval/register.py +23 -0
- nat/eval/remote_workflow.py +133 -0
- nat/eval/runners/__init__.py +14 -0
- nat/eval/runners/config.py +39 -0
- nat/eval/runners/multi_eval_runner.py +54 -0
- nat/eval/runtime_event_subscriber.py +52 -0
- nat/eval/swe_bench_evaluator/__init__.py +0 -0
- nat/eval/swe_bench_evaluator/evaluate.py +215 -0
- nat/eval/swe_bench_evaluator/register.py +36 -0
- nat/eval/trajectory_evaluator/__init__.py +0 -0
- nat/eval/trajectory_evaluator/evaluate.py +75 -0
- nat/eval/trajectory_evaluator/register.py +40 -0
- nat/eval/tunable_rag_evaluator/__init__.py +0 -0
- nat/eval/tunable_rag_evaluator/evaluate.py +245 -0
- nat/eval/tunable_rag_evaluator/register.py +52 -0
- nat/eval/usage_stats.py +41 -0
- nat/eval/utils/__init__.py +0 -0
- nat/eval/utils/output_uploader.py +140 -0
- nat/eval/utils/tqdm_position_registry.py +40 -0
- nat/eval/utils/weave_eval.py +184 -0
- nat/experimental/__init__.py +0 -0
- nat/experimental/decorators/__init__.py +0 -0
- nat/experimental/decorators/experimental_warning_decorator.py +134 -0
- nat/experimental/test_time_compute/__init__.py +0 -0
- nat/experimental/test_time_compute/editing/__init__.py +0 -0
- nat/experimental/test_time_compute/editing/iterative_plan_refinement_editor.py +147 -0
- nat/experimental/test_time_compute/editing/llm_as_a_judge_editor.py +204 -0
- nat/experimental/test_time_compute/editing/motivation_aware_summarization.py +107 -0
- nat/experimental/test_time_compute/functions/__init__.py +0 -0
- nat/experimental/test_time_compute/functions/execute_score_select_function.py +105 -0
- nat/experimental/test_time_compute/functions/plan_select_execute_function.py +224 -0
- nat/experimental/test_time_compute/functions/ttc_tool_orchestration_function.py +205 -0
- nat/experimental/test_time_compute/functions/ttc_tool_wrapper_function.py +146 -0
- nat/experimental/test_time_compute/models/__init__.py +0 -0
- nat/experimental/test_time_compute/models/editor_config.py +132 -0
- nat/experimental/test_time_compute/models/scoring_config.py +112 -0
- nat/experimental/test_time_compute/models/search_config.py +120 -0
- nat/experimental/test_time_compute/models/selection_config.py +154 -0
- nat/experimental/test_time_compute/models/stage_enums.py +43 -0
- nat/experimental/test_time_compute/models/strategy_base.py +66 -0
- nat/experimental/test_time_compute/models/tool_use_config.py +41 -0
- nat/experimental/test_time_compute/models/ttc_item.py +48 -0
- nat/experimental/test_time_compute/register.py +36 -0
- nat/experimental/test_time_compute/scoring/__init__.py +0 -0
- nat/experimental/test_time_compute/scoring/llm_based_agent_scorer.py +168 -0
- nat/experimental/test_time_compute/scoring/llm_based_plan_scorer.py +168 -0
- nat/experimental/test_time_compute/scoring/motivation_aware_scorer.py +111 -0
- nat/experimental/test_time_compute/search/__init__.py +0 -0
- nat/experimental/test_time_compute/search/multi_llm_planner.py +128 -0
- nat/experimental/test_time_compute/search/multi_query_retrieval_search.py +122 -0
- nat/experimental/test_time_compute/search/single_shot_multi_plan_planner.py +128 -0
- nat/experimental/test_time_compute/selection/__init__.py +0 -0
- nat/experimental/test_time_compute/selection/best_of_n_selector.py +63 -0
- nat/experimental/test_time_compute/selection/llm_based_agent_output_selector.py +131 -0
- nat/experimental/test_time_compute/selection/llm_based_output_merging_selector.py +159 -0
- nat/experimental/test_time_compute/selection/llm_based_plan_selector.py +128 -0
- nat/experimental/test_time_compute/selection/threshold_selector.py +58 -0
- nat/front_ends/__init__.py +14 -0
- nat/front_ends/console/__init__.py +14 -0
- nat/front_ends/console/authentication_flow_handler.py +233 -0
- nat/front_ends/console/console_front_end_config.py +32 -0
- nat/front_ends/console/console_front_end_plugin.py +96 -0
- nat/front_ends/console/register.py +25 -0
- nat/front_ends/cron/__init__.py +14 -0
- nat/front_ends/fastapi/__init__.py +14 -0
- nat/front_ends/fastapi/auth_flow_handlers/__init__.py +0 -0
- nat/front_ends/fastapi/auth_flow_handlers/http_flow_handler.py +27 -0
- nat/front_ends/fastapi/auth_flow_handlers/websocket_flow_handler.py +107 -0
- nat/front_ends/fastapi/fastapi_front_end_config.py +241 -0
- nat/front_ends/fastapi/fastapi_front_end_controller.py +68 -0
- nat/front_ends/fastapi/fastapi_front_end_plugin.py +116 -0
- nat/front_ends/fastapi/fastapi_front_end_plugin_worker.py +1087 -0
- nat/front_ends/fastapi/html_snippets/__init__.py +14 -0
- nat/front_ends/fastapi/html_snippets/auth_code_grant_success.py +35 -0
- nat/front_ends/fastapi/intermediate_steps_subscriber.py +80 -0
- nat/front_ends/fastapi/job_store.py +183 -0
- nat/front_ends/fastapi/main.py +72 -0
- nat/front_ends/fastapi/message_handler.py +320 -0
- nat/front_ends/fastapi/message_validator.py +352 -0
- nat/front_ends/fastapi/register.py +25 -0
- nat/front_ends/fastapi/response_helpers.py +195 -0
- nat/front_ends/fastapi/step_adaptor.py +319 -0
- nat/front_ends/mcp/__init__.py +14 -0
- nat/front_ends/mcp/mcp_front_end_config.py +36 -0
- nat/front_ends/mcp/mcp_front_end_plugin.py +81 -0
- nat/front_ends/mcp/mcp_front_end_plugin_worker.py +143 -0
- nat/front_ends/mcp/register.py +27 -0
- nat/front_ends/mcp/tool_converter.py +241 -0
- nat/front_ends/register.py +22 -0
- nat/front_ends/simple_base/__init__.py +14 -0
- nat/front_ends/simple_base/simple_front_end_plugin_base.py +54 -0
- nat/llm/__init__.py +0 -0
- nat/llm/aws_bedrock_llm.py +57 -0
- nat/llm/nim_llm.py +46 -0
- nat/llm/openai_llm.py +46 -0
- nat/llm/register.py +23 -0
- nat/llm/utils/__init__.py +14 -0
- nat/llm/utils/env_config_value.py +94 -0
- nat/llm/utils/error.py +17 -0
- nat/memory/__init__.py +20 -0
- nat/memory/interfaces.py +183 -0
- nat/memory/models.py +112 -0
- nat/meta/pypi.md +58 -0
- nat/object_store/__init__.py +20 -0
- nat/object_store/in_memory_object_store.py +76 -0
- nat/object_store/interfaces.py +84 -0
- nat/object_store/models.py +38 -0
- nat/object_store/register.py +20 -0
- nat/observability/__init__.py +14 -0
- nat/observability/exporter/__init__.py +14 -0
- nat/observability/exporter/base_exporter.py +449 -0
- nat/observability/exporter/exporter.py +78 -0
- nat/observability/exporter/file_exporter.py +33 -0
- nat/observability/exporter/processing_exporter.py +322 -0
- nat/observability/exporter/raw_exporter.py +52 -0
- nat/observability/exporter/span_exporter.py +288 -0
- nat/observability/exporter_manager.py +335 -0
- nat/observability/mixin/__init__.py +14 -0
- nat/observability/mixin/batch_config_mixin.py +26 -0
- nat/observability/mixin/collector_config_mixin.py +23 -0
- nat/observability/mixin/file_mixin.py +288 -0
- nat/observability/mixin/file_mode.py +23 -0
- nat/observability/mixin/resource_conflict_mixin.py +134 -0
- nat/observability/mixin/serialize_mixin.py +61 -0
- nat/observability/mixin/type_introspection_mixin.py +183 -0
- nat/observability/processor/__init__.py +14 -0
- nat/observability/processor/batching_processor.py +310 -0
- nat/observability/processor/callback_processor.py +42 -0
- nat/observability/processor/intermediate_step_serializer.py +28 -0
- nat/observability/processor/processor.py +71 -0
- nat/observability/register.py +96 -0
- nat/observability/utils/__init__.py +14 -0
- nat/observability/utils/dict_utils.py +236 -0
- nat/observability/utils/time_utils.py +31 -0
- nat/plugins/.namespace +1 -0
- nat/profiler/__init__.py +0 -0
- nat/profiler/calc/__init__.py +14 -0
- nat/profiler/calc/calc_runner.py +627 -0
- nat/profiler/calc/calculations.py +288 -0
- nat/profiler/calc/data_models.py +188 -0
- nat/profiler/calc/plot.py +345 -0
- nat/profiler/callbacks/__init__.py +0 -0
- nat/profiler/callbacks/agno_callback_handler.py +295 -0
- nat/profiler/callbacks/base_callback_class.py +20 -0
- nat/profiler/callbacks/langchain_callback_handler.py +290 -0
- nat/profiler/callbacks/llama_index_callback_handler.py +205 -0
- nat/profiler/callbacks/semantic_kernel_callback_handler.py +238 -0
- nat/profiler/callbacks/token_usage_base_model.py +27 -0
- nat/profiler/data_frame_row.py +51 -0
- nat/profiler/data_models.py +24 -0
- nat/profiler/decorators/__init__.py +0 -0
- nat/profiler/decorators/framework_wrapper.py +131 -0
- nat/profiler/decorators/function_tracking.py +254 -0
- nat/profiler/forecasting/__init__.py +0 -0
- nat/profiler/forecasting/config.py +18 -0
- nat/profiler/forecasting/model_trainer.py +75 -0
- nat/profiler/forecasting/models/__init__.py +22 -0
- nat/profiler/forecasting/models/forecasting_base_model.py +40 -0
- nat/profiler/forecasting/models/linear_model.py +197 -0
- nat/profiler/forecasting/models/random_forest_regressor.py +269 -0
- nat/profiler/inference_metrics_model.py +28 -0
- nat/profiler/inference_optimization/__init__.py +0 -0
- nat/profiler/inference_optimization/bottleneck_analysis/__init__.py +0 -0
- nat/profiler/inference_optimization/bottleneck_analysis/nested_stack_analysis.py +460 -0
- nat/profiler/inference_optimization/bottleneck_analysis/simple_stack_analysis.py +258 -0
- nat/profiler/inference_optimization/data_models.py +386 -0
- nat/profiler/inference_optimization/experimental/__init__.py +0 -0
- nat/profiler/inference_optimization/experimental/concurrency_spike_analysis.py +468 -0
- nat/profiler/inference_optimization/experimental/prefix_span_analysis.py +405 -0
- nat/profiler/inference_optimization/llm_metrics.py +212 -0
- nat/profiler/inference_optimization/prompt_caching.py +163 -0
- nat/profiler/inference_optimization/token_uniqueness.py +107 -0
- nat/profiler/inference_optimization/workflow_runtimes.py +72 -0
- nat/profiler/intermediate_property_adapter.py +102 -0
- nat/profiler/profile_runner.py +473 -0
- nat/profiler/utils.py +184 -0
- nat/registry_handlers/__init__.py +0 -0
- nat/registry_handlers/local/__init__.py +0 -0
- nat/registry_handlers/local/local_handler.py +176 -0
- nat/registry_handlers/local/register_local.py +37 -0
- nat/registry_handlers/metadata_factory.py +60 -0
- nat/registry_handlers/package_utils.py +571 -0
- nat/registry_handlers/pypi/__init__.py +0 -0
- nat/registry_handlers/pypi/pypi_handler.py +251 -0
- nat/registry_handlers/pypi/register_pypi.py +40 -0
- nat/registry_handlers/register.py +21 -0
- nat/registry_handlers/registry_handler_base.py +157 -0
- nat/registry_handlers/rest/__init__.py +0 -0
- nat/registry_handlers/rest/register_rest.py +56 -0
- nat/registry_handlers/rest/rest_handler.py +237 -0
- nat/registry_handlers/schemas/__init__.py +0 -0
- nat/registry_handlers/schemas/headers.py +42 -0
- nat/registry_handlers/schemas/package.py +68 -0
- nat/registry_handlers/schemas/publish.py +68 -0
- nat/registry_handlers/schemas/pull.py +82 -0
- nat/registry_handlers/schemas/remove.py +36 -0
- nat/registry_handlers/schemas/search.py +91 -0
- nat/registry_handlers/schemas/status.py +47 -0
- nat/retriever/__init__.py +0 -0
- nat/retriever/interface.py +41 -0
- nat/retriever/milvus/__init__.py +14 -0
- nat/retriever/milvus/register.py +81 -0
- nat/retriever/milvus/retriever.py +228 -0
- nat/retriever/models.py +77 -0
- nat/retriever/nemo_retriever/__init__.py +14 -0
- nat/retriever/nemo_retriever/register.py +60 -0
- nat/retriever/nemo_retriever/retriever.py +190 -0
- nat/retriever/register.py +22 -0
- nat/runtime/__init__.py +14 -0
- nat/runtime/loader.py +220 -0
- nat/runtime/runner.py +195 -0
- nat/runtime/session.py +162 -0
- nat/runtime/user_metadata.py +130 -0
- nat/settings/__init__.py +0 -0
- nat/settings/global_settings.py +318 -0
- nat/test/.namespace +1 -0
- nat/tool/__init__.py +0 -0
- nat/tool/chat_completion.py +74 -0
- nat/tool/code_execution/README.md +151 -0
- nat/tool/code_execution/__init__.py +0 -0
- nat/tool/code_execution/code_sandbox.py +267 -0
- nat/tool/code_execution/local_sandbox/.gitignore +1 -0
- nat/tool/code_execution/local_sandbox/Dockerfile.sandbox +60 -0
- nat/tool/code_execution/local_sandbox/__init__.py +13 -0
- nat/tool/code_execution/local_sandbox/local_sandbox_server.py +198 -0
- nat/tool/code_execution/local_sandbox/sandbox.requirements.txt +6 -0
- nat/tool/code_execution/local_sandbox/start_local_sandbox.sh +50 -0
- nat/tool/code_execution/register.py +74 -0
- nat/tool/code_execution/test_code_execution_sandbox.py +414 -0
- nat/tool/code_execution/utils.py +100 -0
- nat/tool/datetime_tools.py +42 -0
- nat/tool/document_search.py +141 -0
- nat/tool/github_tools/__init__.py +0 -0
- nat/tool/github_tools/create_github_commit.py +133 -0
- nat/tool/github_tools/create_github_issue.py +87 -0
- nat/tool/github_tools/create_github_pr.py +106 -0
- nat/tool/github_tools/get_github_file.py +106 -0
- nat/tool/github_tools/get_github_issue.py +166 -0
- nat/tool/github_tools/get_github_pr.py +256 -0
- nat/tool/github_tools/update_github_issue.py +100 -0
- nat/tool/mcp/__init__.py +14 -0
- nat/tool/mcp/exceptions.py +142 -0
- nat/tool/mcp/mcp_client.py +255 -0
- nat/tool/mcp/mcp_tool.py +96 -0
- nat/tool/memory_tools/__init__.py +0 -0
- nat/tool/memory_tools/add_memory_tool.py +79 -0
- nat/tool/memory_tools/delete_memory_tool.py +67 -0
- nat/tool/memory_tools/get_memory_tool.py +72 -0
- nat/tool/nvidia_rag.py +95 -0
- nat/tool/register.py +38 -0
- nat/tool/retriever.py +94 -0
- nat/tool/server_tools.py +66 -0
- nat/utils/__init__.py +0 -0
- nat/utils/data_models/__init__.py +0 -0
- nat/utils/data_models/schema_validator.py +58 -0
- nat/utils/debugging_utils.py +43 -0
- nat/utils/dump_distro_mapping.py +32 -0
- nat/utils/exception_handlers/__init__.py +0 -0
- nat/utils/exception_handlers/automatic_retries.py +289 -0
- nat/utils/exception_handlers/mcp.py +211 -0
- nat/utils/exception_handlers/schemas.py +114 -0
- nat/utils/io/__init__.py +0 -0
- nat/utils/io/model_processing.py +28 -0
- nat/utils/io/yaml_tools.py +119 -0
- nat/utils/log_utils.py +37 -0
- nat/utils/metadata_utils.py +74 -0
- nat/utils/optional_imports.py +142 -0
- nat/utils/producer_consumer_queue.py +178 -0
- nat/utils/reactive/__init__.py +0 -0
- nat/utils/reactive/base/__init__.py +0 -0
- nat/utils/reactive/base/observable_base.py +65 -0
- nat/utils/reactive/base/observer_base.py +55 -0
- nat/utils/reactive/base/subject_base.py +79 -0
- nat/utils/reactive/observable.py +59 -0
- nat/utils/reactive/observer.py +76 -0
- nat/utils/reactive/subject.py +131 -0
- nat/utils/reactive/subscription.py +49 -0
- nat/utils/settings/__init__.py +0 -0
- nat/utils/settings/global_settings.py +197 -0
- nat/utils/string_utils.py +38 -0
- nat/utils/type_converter.py +290 -0
- nat/utils/type_utils.py +484 -0
- nat/utils/url_utils.py +27 -0
- nvidia_nat-1.2.0.dist-info/METADATA +365 -0
- nvidia_nat-1.2.0.dist-info/RECORD +435 -0
- nvidia_nat-1.2.0.dist-info/WHEEL +5 -0
- nvidia_nat-1.2.0.dist-info/entry_points.txt +21 -0
- nvidia_nat-1.2.0.dist-info/licenses/LICENSE-3rd-party.txt +5478 -0
- nvidia_nat-1.2.0.dist-info/licenses/LICENSE.md +201 -0
- nvidia_nat-1.2.0.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import asyncio
|
|
17
|
+
import logging
|
|
18
|
+
from typing import Any
|
|
19
|
+
|
|
20
|
+
from nat.eval.evaluator.evaluator_model import EvalInput
|
|
21
|
+
from nat.eval.evaluator.evaluator_model import EvalInputItem
|
|
22
|
+
from nat.eval.evaluator.evaluator_model import EvalOutput
|
|
23
|
+
from nat.eval.usage_stats import UsageStats
|
|
24
|
+
from nat.eval.usage_stats import UsageStatsItem
|
|
25
|
+
from nat.profiler.data_models import ProfilerResults
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class WeaveEvaluationIntegration: # pylint: disable=too-many-public-methods
|
|
31
|
+
"""
|
|
32
|
+
Class to handle all Weave integration functionality.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(self):
|
|
36
|
+
self.available = False
|
|
37
|
+
self.client = None
|
|
38
|
+
self.eval_logger = None
|
|
39
|
+
self.pred_loggers = {}
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
from weave.flow.eval_imperative import EvaluationLogger
|
|
43
|
+
from weave.flow.eval_imperative import ScoreLogger
|
|
44
|
+
from weave.trace.context import weave_client_context
|
|
45
|
+
self.EvaluationLogger = EvaluationLogger
|
|
46
|
+
self.ScoreLogger = ScoreLogger
|
|
47
|
+
self.weave_client_context = weave_client_context
|
|
48
|
+
self.available = True
|
|
49
|
+
except ImportError:
|
|
50
|
+
self.available = False
|
|
51
|
+
# we simply don't do anything if weave is not available
|
|
52
|
+
pass
|
|
53
|
+
|
|
54
|
+
def initialize_client(self):
|
|
55
|
+
"""Initialize the Weave client if available."""
|
|
56
|
+
if not self.available:
|
|
57
|
+
return False
|
|
58
|
+
|
|
59
|
+
try:
|
|
60
|
+
self.client = self.weave_client_context.require_weave_client()
|
|
61
|
+
return self.client is not None
|
|
62
|
+
except Exception:
|
|
63
|
+
self.client = None
|
|
64
|
+
return False
|
|
65
|
+
|
|
66
|
+
def _get_prediction_inputs(self, item: EvalInputItem):
|
|
67
|
+
"""Get the inputs for displaying in the UI.
|
|
68
|
+
The following fields are excluded as they are too large to display in the UI:
|
|
69
|
+
- full_dataset_entry
|
|
70
|
+
- expected_trajectory
|
|
71
|
+
- trajectory
|
|
72
|
+
|
|
73
|
+
output_obj is excluded because it is displayed separately.
|
|
74
|
+
"""
|
|
75
|
+
include = {"id", "input_obj", "expected_output_obj"}
|
|
76
|
+
return item.model_dump(include=include)
|
|
77
|
+
|
|
78
|
+
def _get_weave_dataset(self, eval_input: EvalInput):
|
|
79
|
+
"""Get the full dataset for Weave."""
|
|
80
|
+
return [item.full_dataset_entry for item in eval_input.eval_input_items]
|
|
81
|
+
|
|
82
|
+
def initialize_logger(self, workflow_alias: str, eval_input: EvalInput, config: Any):
|
|
83
|
+
"""Initialize the Weave evaluation logger."""
|
|
84
|
+
if not self.client and not self.initialize_client():
|
|
85
|
+
# lazy init the client
|
|
86
|
+
return False
|
|
87
|
+
|
|
88
|
+
try:
|
|
89
|
+
weave_dataset = self._get_weave_dataset(eval_input)
|
|
90
|
+
config_dict = config.model_dump(mode="json")
|
|
91
|
+
config_dict["name"] = workflow_alias
|
|
92
|
+
self.eval_logger = self.EvaluationLogger(model=config_dict, dataset=weave_dataset)
|
|
93
|
+
self.pred_loggers = {}
|
|
94
|
+
|
|
95
|
+
return True
|
|
96
|
+
except Exception as e:
|
|
97
|
+
self.eval_logger = None
|
|
98
|
+
logger.warning("Failed to initialize Weave `EvaluationLogger`: %s", e)
|
|
99
|
+
|
|
100
|
+
return False
|
|
101
|
+
|
|
102
|
+
def log_prediction(self, item: EvalInputItem, output: Any):
|
|
103
|
+
"""Log a prediction to Weave."""
|
|
104
|
+
if not self.eval_logger:
|
|
105
|
+
return
|
|
106
|
+
|
|
107
|
+
pred_logger = self.eval_logger.log_prediction(inputs=self._get_prediction_inputs(item), output=output)
|
|
108
|
+
self.pred_loggers[item.id] = pred_logger
|
|
109
|
+
|
|
110
|
+
async def log_usage_stats(self, item: EvalInputItem, usage_stats_item: UsageStatsItem):
|
|
111
|
+
"""Log usage stats to Weave."""
|
|
112
|
+
if not self.eval_logger:
|
|
113
|
+
return
|
|
114
|
+
|
|
115
|
+
# log each usage stat as a score
|
|
116
|
+
await self.pred_loggers[item.id].alog_score(scorer="wf_runtime", score=usage_stats_item.runtime)
|
|
117
|
+
|
|
118
|
+
# log the total tokens for this item, per-llm tokens can be exported later if needed
|
|
119
|
+
await self.pred_loggers[item.id].alog_score(scorer="wf_tokens", score=usage_stats_item.total_tokens)
|
|
120
|
+
|
|
121
|
+
async def alog_score(self, eval_output: EvalOutput, evaluator_name: str):
|
|
122
|
+
"""Log scores for evaluation outputs."""
|
|
123
|
+
if not self.eval_logger:
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
# Create coroutines for all score logging operations
|
|
127
|
+
coros = []
|
|
128
|
+
for eval_output_item in eval_output.eval_output_items:
|
|
129
|
+
if eval_output_item.id in self.pred_loggers:
|
|
130
|
+
coros.append(self.pred_loggers[eval_output_item.id].alog_score(
|
|
131
|
+
scorer=evaluator_name,
|
|
132
|
+
score=eval_output_item.score,
|
|
133
|
+
))
|
|
134
|
+
|
|
135
|
+
# Execute all coroutines concurrently
|
|
136
|
+
if coros:
|
|
137
|
+
await asyncio.gather(*coros)
|
|
138
|
+
|
|
139
|
+
async def afinish_loggers(self):
|
|
140
|
+
"""Finish all prediction loggers."""
|
|
141
|
+
if not self.eval_logger:
|
|
142
|
+
return
|
|
143
|
+
|
|
144
|
+
async def _finish_one(pred_logger):
|
|
145
|
+
if hasattr(pred_logger, '_has_finished') and not pred_logger._has_finished:
|
|
146
|
+
return
|
|
147
|
+
# run the *blocking* finish() in a thread so we don't nest loops
|
|
148
|
+
await asyncio.to_thread(pred_logger.finish)
|
|
149
|
+
|
|
150
|
+
await asyncio.gather(*[_finish_one(pl) for pl in self.pred_loggers.values()])
|
|
151
|
+
|
|
152
|
+
def _log_profiler_metrics(self, profiler_results: ProfilerResults, usage_stats: UsageStats) -> dict[str, Any]:
|
|
153
|
+
"""Log profiler metrics to Weave."""
|
|
154
|
+
profile_metrics = {}
|
|
155
|
+
if profiler_results.llm_latency_ci:
|
|
156
|
+
profile_metrics["llm_latency_p95"] = profiler_results.llm_latency_ci.p95
|
|
157
|
+
if profiler_results.workflow_runtime_metrics:
|
|
158
|
+
profile_metrics["wf_runtime_p95"] = profiler_results.workflow_runtime_metrics.p95
|
|
159
|
+
|
|
160
|
+
# TODO:get the LLM tokens from the usage stats and log them
|
|
161
|
+
profile_metrics["total_runtime"] = usage_stats.total_runtime
|
|
162
|
+
|
|
163
|
+
return profile_metrics
|
|
164
|
+
|
|
165
|
+
def log_summary(self,
|
|
166
|
+
usage_stats: UsageStats,
|
|
167
|
+
evaluation_results: list[tuple[str, EvalOutput]],
|
|
168
|
+
profiler_results: ProfilerResults):
|
|
169
|
+
"""Log summary statistics to Weave."""
|
|
170
|
+
if not self.eval_logger:
|
|
171
|
+
return
|
|
172
|
+
|
|
173
|
+
summary = {}
|
|
174
|
+
# add evaluation results to the summary
|
|
175
|
+
for evaluator_name, eval_output in evaluation_results:
|
|
176
|
+
summary[evaluator_name] = eval_output.average_score
|
|
177
|
+
|
|
178
|
+
# add profiler metrics to the summary
|
|
179
|
+
profile_metrics = self._log_profiler_metrics(profiler_results, usage_stats)
|
|
180
|
+
summary.update(profile_metrics)
|
|
181
|
+
|
|
182
|
+
# Log the summary to finish the evaluation, disable auto-summarize
|
|
183
|
+
# as we will be adding profiler metrics to the summary
|
|
184
|
+
self.eval_logger.log_summary(summary, auto_summarize=False)
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import functools
|
|
17
|
+
import inspect
|
|
18
|
+
import logging
|
|
19
|
+
from typing import Any
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
BASE_WARNING_MESSAGE = ("is experimental and the API may change in future releases. "
|
|
24
|
+
"Future versions may introduce breaking changes without notice.")
|
|
25
|
+
|
|
26
|
+
_warning_issued = set()
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def issue_experimental_warning(function_name: str,
|
|
30
|
+
feature_name: str | None = None,
|
|
31
|
+
metadata: dict[str, Any] | None = None):
|
|
32
|
+
"""
|
|
33
|
+
Log a warning message that the function is experimental.
|
|
34
|
+
|
|
35
|
+
A warning is emitted only once per function. When a ``metadata`` dict
|
|
36
|
+
is supplied, it is appended to the log entry to provide extra context
|
|
37
|
+
(e.g., version, author, feature flag).
|
|
38
|
+
"""
|
|
39
|
+
if function_name not in _warning_issued:
|
|
40
|
+
if (feature_name):
|
|
41
|
+
warning_message = f"The {feature_name} feature {BASE_WARNING_MESSAGE}"
|
|
42
|
+
else:
|
|
43
|
+
warning_message = f"This function {BASE_WARNING_MESSAGE}"
|
|
44
|
+
|
|
45
|
+
warning_message += f" Function: {function_name}"
|
|
46
|
+
|
|
47
|
+
if (metadata):
|
|
48
|
+
warning_message += f" | Metadata: {metadata}"
|
|
49
|
+
|
|
50
|
+
# Issue warning and save function name to avoid duplicate warnings
|
|
51
|
+
logger.warning(warning_message)
|
|
52
|
+
|
|
53
|
+
_warning_issued.add(function_name)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def experimental(func: Any = None, *, feature_name: str | None = None, metadata: dict[str, Any] | None = None):
|
|
57
|
+
"""
|
|
58
|
+
Decorator that can wrap any type of function (sync, async, generator,
|
|
59
|
+
async generator) and logs a warning that the function is experimental.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
func: The function to be decorated.
|
|
63
|
+
feature_name: Optional name of the feature that is experimental. If provided, the warning will be
|
|
64
|
+
prefixed with "The <feature_name> feature is experimental".
|
|
65
|
+
metadata: Optional dictionary of metadata to log with the warning. This can include information
|
|
66
|
+
like version, author, etc. If provided, the metadata will be
|
|
67
|
+
logged alongside the experimental warning.
|
|
68
|
+
"""
|
|
69
|
+
function_name: str = f"{func.__module__}.{func.__qualname__}" if func else "<unknown_function>"
|
|
70
|
+
|
|
71
|
+
# If called as @track_function(...) but not immediately passed a function
|
|
72
|
+
if func is None:
|
|
73
|
+
|
|
74
|
+
def decorator_wrapper(actual_func):
|
|
75
|
+
return experimental(actual_func, feature_name=feature_name, metadata=metadata)
|
|
76
|
+
|
|
77
|
+
return decorator_wrapper
|
|
78
|
+
|
|
79
|
+
# --- Validate metadata ---
|
|
80
|
+
if metadata is not None:
|
|
81
|
+
if not isinstance(metadata, dict):
|
|
82
|
+
raise TypeError("metadata must be a dict[str, Any].")
|
|
83
|
+
if any(not isinstance(k, str) for k in metadata.keys()):
|
|
84
|
+
raise TypeError("All metadata keys must be strings.")
|
|
85
|
+
|
|
86
|
+
# --- Now detect the function type and wrap accordingly ---
|
|
87
|
+
if inspect.isasyncgenfunction(func):
|
|
88
|
+
# ---------------------
|
|
89
|
+
# ASYNC GENERATOR
|
|
90
|
+
# ---------------------
|
|
91
|
+
|
|
92
|
+
@functools.wraps(func)
|
|
93
|
+
async def async_gen_wrapper(*args, **kwargs):
|
|
94
|
+
issue_experimental_warning(function_name, feature_name, metadata)
|
|
95
|
+
async for item in func(*args, **kwargs):
|
|
96
|
+
yield item # yield the original item
|
|
97
|
+
|
|
98
|
+
return async_gen_wrapper
|
|
99
|
+
|
|
100
|
+
if inspect.iscoroutinefunction(func):
|
|
101
|
+
# ---------------------
|
|
102
|
+
# ASYNC FUNCTION
|
|
103
|
+
# ---------------------
|
|
104
|
+
@functools.wraps(func)
|
|
105
|
+
async def async_wrapper(*args, **kwargs):
|
|
106
|
+
issue_experimental_warning(function_name, feature_name, metadata)
|
|
107
|
+
result = await func(*args, **kwargs)
|
|
108
|
+
return result
|
|
109
|
+
|
|
110
|
+
return async_wrapper
|
|
111
|
+
|
|
112
|
+
if inspect.isgeneratorfunction(func):
|
|
113
|
+
# ---------------------
|
|
114
|
+
# SYNC GENERATOR
|
|
115
|
+
# ---------------------
|
|
116
|
+
@functools.wraps(func)
|
|
117
|
+
def sync_gen_wrapper(*args, **kwargs):
|
|
118
|
+
issue_experimental_warning(function_name, feature_name, metadata)
|
|
119
|
+
for item in func(*args, **kwargs):
|
|
120
|
+
yield item # yield the original item
|
|
121
|
+
|
|
122
|
+
return sync_gen_wrapper
|
|
123
|
+
|
|
124
|
+
@functools.wraps(func)
|
|
125
|
+
def sync_wrapper(*args, **kwargs):
|
|
126
|
+
issue_experimental_warning(function_name, feature_name, metadata)
|
|
127
|
+
result = func(*args, **kwargs)
|
|
128
|
+
return result
|
|
129
|
+
|
|
130
|
+
return sync_wrapper
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
# Compatibility aliases with previous releases
|
|
134
|
+
aiq_experimental = experimental
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import asyncio
|
|
17
|
+
import logging
|
|
18
|
+
import re
|
|
19
|
+
|
|
20
|
+
from nat.builder.builder import Builder
|
|
21
|
+
from nat.builder.framework_enum import LLMFrameworkEnum
|
|
22
|
+
from nat.cli.register_workflow import register_ttc_strategy
|
|
23
|
+
from nat.data_models.ttc_strategy import TTCStrategyBaseConfig
|
|
24
|
+
from nat.experimental.test_time_compute.models.editor_config import IterativePlanRefinementConfig
|
|
25
|
+
from nat.experimental.test_time_compute.models.stage_enums import PipelineTypeEnum
|
|
26
|
+
from nat.experimental.test_time_compute.models.stage_enums import StageTypeEnum
|
|
27
|
+
from nat.experimental.test_time_compute.models.strategy_base import StrategyBase
|
|
28
|
+
from nat.experimental.test_time_compute.models.ttc_item import TTCItem
|
|
29
|
+
from nat.utils.io.model_processing import remove_r1_think_tags
|
|
30
|
+
|
|
31
|
+
logger = logging.getLogger(__name__)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class IterativePlanRefinementEditor(StrategyBase):
|
|
35
|
+
"""
|
|
36
|
+
A planner that generates an initial plan, then refines it multiple times
|
|
37
|
+
using the same LLM. Each iteration updates the plan to (hopefully) be better.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(self, config: TTCStrategyBaseConfig) -> None:
|
|
41
|
+
super().__init__(config)
|
|
42
|
+
self.llm_bound = None
|
|
43
|
+
|
|
44
|
+
def supported_pipeline_types(self) -> [PipelineTypeEnum]:
|
|
45
|
+
return [PipelineTypeEnum.PLANNING]
|
|
46
|
+
|
|
47
|
+
def stage_type(self) -> StageTypeEnum:
|
|
48
|
+
return StageTypeEnum.EDITING
|
|
49
|
+
|
|
50
|
+
async def build_components(self, builder: Builder) -> None:
|
|
51
|
+
"""
|
|
52
|
+
Build the components required for the iterative planner.
|
|
53
|
+
"""
|
|
54
|
+
logger.debug("Building components for IterativePlanRefinementEditor")
|
|
55
|
+
self.llm_bound = await builder.get_llm(self.config.editor_llm, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
|
|
56
|
+
|
|
57
|
+
async def refine_single(self, prompt: str, context: str, ttc_item: TTCItem, prompt_idx: int) -> TTCItem:
|
|
58
|
+
from langchain_core.language_models import BaseChatModel
|
|
59
|
+
from langchain_core.prompts import PromptTemplate
|
|
60
|
+
|
|
61
|
+
if not isinstance(self.llm_bound, BaseChatModel):
|
|
62
|
+
raise ValueError("editor_llm must be a BaseChatModel instance for iterative plan refinement.")
|
|
63
|
+
|
|
64
|
+
llm: BaseChatModel = self.llm_bound
|
|
65
|
+
|
|
66
|
+
# Refinement loop
|
|
67
|
+
refinement_template = PromptTemplate(
|
|
68
|
+
template=self.config.refinement_template,
|
|
69
|
+
input_variables=["current_plan", "context", "original_prompt"],
|
|
70
|
+
validate_template=True,
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
current_plan = ttc_item.plan
|
|
74
|
+
for iteration in range(1, self.config.num_iterations + 1):
|
|
75
|
+
logger.info("Refinement iteration %d / %d for prompt %d", iteration, self.config.num_iterations, prompt_idx)
|
|
76
|
+
refine_prompt = (await refinement_template.ainvoke({
|
|
77
|
+
"current_plan": current_plan, "context": context, "original_prompt": prompt
|
|
78
|
+
})).to_string()
|
|
79
|
+
|
|
80
|
+
refine_response = await llm.ainvoke(refine_prompt)
|
|
81
|
+
refined_plan = remove_r1_think_tags(
|
|
82
|
+
refine_response.content if hasattr(refine_response, 'content') else str(refine_response))
|
|
83
|
+
refined_plan = re.sub(r'(?i)^\s*EDITED PLAN:\s*', '', refined_plan).strip()
|
|
84
|
+
if refined_plan:
|
|
85
|
+
current_plan = refined_plan
|
|
86
|
+
else:
|
|
87
|
+
logger.warning("Refinement iteration %d for prompt %d produced an empty plan; keeping existing plan.",
|
|
88
|
+
iteration,
|
|
89
|
+
prompt_idx)
|
|
90
|
+
|
|
91
|
+
logger.info("IterativePlanRefinementPlanner produced a final plan after %d iterations.",
|
|
92
|
+
self.config.num_iterations)
|
|
93
|
+
|
|
94
|
+
ttc_item.plan = current_plan
|
|
95
|
+
# Return a single final plan
|
|
96
|
+
return ttc_item
|
|
97
|
+
|
|
98
|
+
async def ainvoke(self,
|
|
99
|
+
items: list[TTCItem],
|
|
100
|
+
original_prompt: str | None = None,
|
|
101
|
+
agent_context: str | None = None,
|
|
102
|
+
**kwargs) -> list[TTCItem]:
|
|
103
|
+
"""
|
|
104
|
+
Runs the iterative plan refinement process on the provided planning items.
|
|
105
|
+
|
|
106
|
+
Each planning item is refined in parallel the configured number of times. Default is 3.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
items (list[TTCItem]): The planning items to refine.
|
|
110
|
+
original_prompt (str): The original prompt used to generate the plans.
|
|
111
|
+
agent_context (str): The context for the agent.
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
list[TTCItem]: The refined planning items.
|
|
115
|
+
"""
|
|
116
|
+
|
|
117
|
+
if not original_prompt or not agent_context:
|
|
118
|
+
raise ValueError("Arguments original_prompt and agent_context must be provdied.")
|
|
119
|
+
|
|
120
|
+
# Generate feedback for each planning item concurrently
|
|
121
|
+
tasks = [
|
|
122
|
+
self.refine_single(prompt=original_prompt, context=agent_context, ttc_item=item, prompt_idx=i + 1)
|
|
123
|
+
for i, item in enumerate(items)
|
|
124
|
+
]
|
|
125
|
+
|
|
126
|
+
# Run the tasks concurrently and gather results
|
|
127
|
+
refined_planning_items = await asyncio.gather(*tasks)
|
|
128
|
+
|
|
129
|
+
return refined_planning_items
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
@register_ttc_strategy(config_type=IterativePlanRefinementConfig)
|
|
133
|
+
async def register_iterative_plan_refinement_editor(config: IterativePlanRefinementConfig, builder: Builder):
|
|
134
|
+
"""
|
|
135
|
+
Register the IterativePlanRefinementEditor strategy.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
config (IterativePlanRefinementConfig): The configuration for the strategy.
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
IterativePlanRefinementEditor: The registered strategy instance.
|
|
142
|
+
"""
|
|
143
|
+
|
|
144
|
+
editor = IterativePlanRefinementEditor(config)
|
|
145
|
+
await editor.build_components(builder=builder)
|
|
146
|
+
|
|
147
|
+
yield editor
|
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import asyncio
|
|
17
|
+
import logging
|
|
18
|
+
import re
|
|
19
|
+
|
|
20
|
+
from nat.builder.builder import Builder
|
|
21
|
+
from nat.builder.framework_enum import LLMFrameworkEnum
|
|
22
|
+
from nat.cli.register_workflow import register_ttc_strategy
|
|
23
|
+
from nat.data_models.ttc_strategy import TTCStrategyBaseConfig
|
|
24
|
+
from nat.experimental.test_time_compute.models.editor_config import LLMAsAJudgeEditorConfig
|
|
25
|
+
from nat.experimental.test_time_compute.models.stage_enums import PipelineTypeEnum
|
|
26
|
+
from nat.experimental.test_time_compute.models.stage_enums import StageTypeEnum
|
|
27
|
+
from nat.experimental.test_time_compute.models.strategy_base import StrategyBase
|
|
28
|
+
from nat.experimental.test_time_compute.models.ttc_item import TTCItem
|
|
29
|
+
from nat.utils.io.model_processing import remove_r1_think_tags
|
|
30
|
+
|
|
31
|
+
logger = logging.getLogger(__name__)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class LLMAsAJudgeEditor(StrategyBase):
|
|
35
|
+
"""
|
|
36
|
+
Given a list of PlanningItems, uses a feedback LLM to generate feedback on each plan
|
|
37
|
+
Then edits the plan based on feedback.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(self, config: TTCStrategyBaseConfig) -> None:
|
|
41
|
+
super().__init__(config)
|
|
42
|
+
self.feedback_llm = None
|
|
43
|
+
self.editing_llm = None
|
|
44
|
+
|
|
45
|
+
async def build_components(self, builder: Builder) -> None:
|
|
46
|
+
"""
|
|
47
|
+
Build the components required for the editor.
|
|
48
|
+
"""
|
|
49
|
+
# Get the feedback LLM
|
|
50
|
+
self.feedback_llm = await builder.get_llm(self.config.feedback_llm, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
|
|
51
|
+
|
|
52
|
+
self.editing_llm = await builder.get_llm(self.config.editing_llm, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
|
|
53
|
+
|
|
54
|
+
def supported_pipeline_types(self) -> [PipelineTypeEnum]:
|
|
55
|
+
return [PipelineTypeEnum.PLANNING]
|
|
56
|
+
|
|
57
|
+
def stage_type(self) -> StageTypeEnum:
|
|
58
|
+
return StageTypeEnum.EDITING
|
|
59
|
+
|
|
60
|
+
async def generate_feedback(self, llm, template, context: str, prompt: str, item: TTCItem) -> TTCItem:
|
|
61
|
+
"""
|
|
62
|
+
Helper function to generate feedback for a given planning item using the provided prompt.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
prompt = await template.ainvoke(
|
|
66
|
+
input={
|
|
67
|
+
"context": context,
|
|
68
|
+
"original_prompt": prompt, # Original prompt used to generate the plans
|
|
69
|
+
"plan": item.plan,
|
|
70
|
+
"num_feedback": self.config.num_feedback
|
|
71
|
+
})
|
|
72
|
+
|
|
73
|
+
feedback_result = await llm.ainvoke(prompt.to_string())
|
|
74
|
+
if not feedback_result:
|
|
75
|
+
logger.warning(f"No feedback generated for plan: {item.plan}.")
|
|
76
|
+
return item
|
|
77
|
+
|
|
78
|
+
# Update the planning item with the generated feedback
|
|
79
|
+
cleaned = remove_r1_think_tags(
|
|
80
|
+
feedback_result.content if hasattr(feedback_result, 'content') else str(feedback_result))
|
|
81
|
+
|
|
82
|
+
# Feedback is the string following 'FEEDBACK:'. Use Regex to extract
|
|
83
|
+
cleaned = re.sub(r'(?i)^\s*FEEDBACK:\s*', '', cleaned).strip()
|
|
84
|
+
if not cleaned:
|
|
85
|
+
logger.warning(f"Feedback was empty for plan: {item.plan}.")
|
|
86
|
+
return item
|
|
87
|
+
|
|
88
|
+
item.feedback = cleaned # Set the feedback in the TTCItem
|
|
89
|
+
|
|
90
|
+
return item
|
|
91
|
+
|
|
92
|
+
async def edit_plan(self, llm, template, context: str, prompt: str, item: TTCItem) -> TTCItem:
|
|
93
|
+
"""
|
|
94
|
+
Helper function to edit a plan based on feedback using the provided prompt.
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
if not item.feedback:
|
|
98
|
+
logger.warning(f"No feedback available for plan: {item.plan}. Cannot edit.")
|
|
99
|
+
return item
|
|
100
|
+
|
|
101
|
+
prompt = await template.ainvoke(
|
|
102
|
+
input={
|
|
103
|
+
"context": context,
|
|
104
|
+
"original_prompt": prompt, # Original prompt used to generate the plans
|
|
105
|
+
"plan": item.plan,
|
|
106
|
+
"feedback": item.feedback
|
|
107
|
+
})
|
|
108
|
+
|
|
109
|
+
editing_result = await llm.ainvoke(prompt.to_string())
|
|
110
|
+
if not editing_result:
|
|
111
|
+
logger.warning(f"No editing result generated for plan: {item.plan}.")
|
|
112
|
+
return item
|
|
113
|
+
|
|
114
|
+
# Update the planning item with the edited plan
|
|
115
|
+
cleaned = remove_r1_think_tags(
|
|
116
|
+
editing_result.content if hasattr(editing_result, 'content') else str(editing_result))
|
|
117
|
+
|
|
118
|
+
# Plan is the string following 'EDITED PLAN:'. Use Regex to extract
|
|
119
|
+
cleaned = re.sub(r'(?i)^\s*EDITED PLAN:\s*', '', cleaned).strip()
|
|
120
|
+
if not cleaned:
|
|
121
|
+
logger.warning(f"Edited plan was empty for plan: {item.plan}. Returning original.")
|
|
122
|
+
return item
|
|
123
|
+
|
|
124
|
+
# Update the plan in the PlanningItem
|
|
125
|
+
item.plan = cleaned
|
|
126
|
+
|
|
127
|
+
return item
|
|
128
|
+
|
|
129
|
+
async def ainvoke(self,
|
|
130
|
+
items: list[TTCItem],
|
|
131
|
+
original_prompt: str | None = None,
|
|
132
|
+
agent_context: str | None = None,
|
|
133
|
+
**kwargs) -> list[TTCItem]:
|
|
134
|
+
"""
|
|
135
|
+
Edit the provided planning items using a feedback LLM.
|
|
136
|
+
"""
|
|
137
|
+
from langchain_core.language_models import BaseChatModel
|
|
138
|
+
from langchain_core.prompts import PromptTemplate
|
|
139
|
+
|
|
140
|
+
# assert self.config.feedback_llm is a BaseChatModel
|
|
141
|
+
if not isinstance(self.feedback_llm, BaseChatModel):
|
|
142
|
+
raise ValueError("The `feedback_llm` must be an instance of `BaseChatModel`.")
|
|
143
|
+
|
|
144
|
+
# assert self.config.editing_llm is a BaseChatModel
|
|
145
|
+
if not isinstance(self.editing_llm, BaseChatModel):
|
|
146
|
+
raise ValueError("The `editing_llm` must be an instance of `BaseChatModel`.")
|
|
147
|
+
|
|
148
|
+
feedback_model: BaseChatModel = self.feedback_llm
|
|
149
|
+
editing_model: BaseChatModel = self.editing_llm
|
|
150
|
+
|
|
151
|
+
feedback_template = PromptTemplate(template=self.config.feedback_template,
|
|
152
|
+
input_variables=["context", "original_prompt", "plan", "num_feedback"],
|
|
153
|
+
validate_template=True)
|
|
154
|
+
|
|
155
|
+
editing_template = PromptTemplate(template=self.config.editor_template,
|
|
156
|
+
input_variables=["context", "original_prompt", "plan", "feedback"],
|
|
157
|
+
validate_template=True)
|
|
158
|
+
|
|
159
|
+
# Generate feedback for each planning item concurrently
|
|
160
|
+
feedback_tasks = [
|
|
161
|
+
self.generate_feedback(
|
|
162
|
+
llm=feedback_model,
|
|
163
|
+
template=feedback_template,
|
|
164
|
+
context=agent_context,
|
|
165
|
+
prompt=original_prompt, # Original prompt used to generate the plans
|
|
166
|
+
item=item) for item in items
|
|
167
|
+
]
|
|
168
|
+
# Run the feedback tasks concurrently and gather results
|
|
169
|
+
planning_items_with_feedback = await asyncio.gather(*feedback_tasks)
|
|
170
|
+
|
|
171
|
+
if not planning_items_with_feedback:
|
|
172
|
+
raise ValueError("No feedback was generated for the planning items. Please check the LLM response.")
|
|
173
|
+
|
|
174
|
+
logger.info("Generated feedback for %d plans.", len(planning_items_with_feedback))
|
|
175
|
+
|
|
176
|
+
# Now edit each planning item based on the feedback concurrently
|
|
177
|
+
editing_tasks = [
|
|
178
|
+
self.edit_plan(
|
|
179
|
+
llm=editing_model,
|
|
180
|
+
template=editing_template,
|
|
181
|
+
context=agent_context,
|
|
182
|
+
prompt=original_prompt, # Original prompt used to generate the plans
|
|
183
|
+
item=item) for item in planning_items_with_feedback
|
|
184
|
+
]
|
|
185
|
+
# Run the editing tasks concurrently and gather results
|
|
186
|
+
edited_planning_items = await asyncio.gather(*editing_tasks)
|
|
187
|
+
|
|
188
|
+
if not edited_planning_items:
|
|
189
|
+
raise ValueError("No plans were edited. Please check the LLM response.")
|
|
190
|
+
|
|
191
|
+
logger.info("Edited %d plans based on feedback.", len(edited_planning_items))
|
|
192
|
+
return edited_planning_items
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
@register_ttc_strategy(config_type=LLMAsAJudgeEditorConfig)
|
|
196
|
+
async def register_llm_as_a_judge_editor(config: TTCStrategyBaseConfig, builder: Builder):
|
|
197
|
+
"""
|
|
198
|
+
Register the LLMAsAJudgeEditor strategy with the provided configuration and builder.
|
|
199
|
+
"""
|
|
200
|
+
|
|
201
|
+
editor = LLMAsAJudgeEditor(config)
|
|
202
|
+
await editor.build_components(builder)
|
|
203
|
+
|
|
204
|
+
yield editor
|