nvidia-nat 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (435) hide show
  1. aiq/__init__.py +66 -0
  2. nat/agent/__init__.py +0 -0
  3. nat/agent/base.py +256 -0
  4. nat/agent/dual_node.py +67 -0
  5. nat/agent/react_agent/__init__.py +0 -0
  6. nat/agent/react_agent/agent.py +363 -0
  7. nat/agent/react_agent/output_parser.py +104 -0
  8. nat/agent/react_agent/prompt.py +44 -0
  9. nat/agent/react_agent/register.py +149 -0
  10. nat/agent/reasoning_agent/__init__.py +0 -0
  11. nat/agent/reasoning_agent/reasoning_agent.py +225 -0
  12. nat/agent/register.py +23 -0
  13. nat/agent/rewoo_agent/__init__.py +0 -0
  14. nat/agent/rewoo_agent/agent.py +415 -0
  15. nat/agent/rewoo_agent/prompt.py +110 -0
  16. nat/agent/rewoo_agent/register.py +157 -0
  17. nat/agent/tool_calling_agent/__init__.py +0 -0
  18. nat/agent/tool_calling_agent/agent.py +119 -0
  19. nat/agent/tool_calling_agent/register.py +106 -0
  20. nat/authentication/__init__.py +14 -0
  21. nat/authentication/api_key/__init__.py +14 -0
  22. nat/authentication/api_key/api_key_auth_provider.py +96 -0
  23. nat/authentication/api_key/api_key_auth_provider_config.py +124 -0
  24. nat/authentication/api_key/register.py +26 -0
  25. nat/authentication/exceptions/__init__.py +14 -0
  26. nat/authentication/exceptions/api_key_exceptions.py +38 -0
  27. nat/authentication/http_basic_auth/__init__.py +0 -0
  28. nat/authentication/http_basic_auth/http_basic_auth_provider.py +81 -0
  29. nat/authentication/http_basic_auth/register.py +30 -0
  30. nat/authentication/interfaces.py +93 -0
  31. nat/authentication/oauth2/__init__.py +14 -0
  32. nat/authentication/oauth2/oauth2_auth_code_flow_provider.py +107 -0
  33. nat/authentication/oauth2/oauth2_auth_code_flow_provider_config.py +39 -0
  34. nat/authentication/oauth2/register.py +25 -0
  35. nat/authentication/register.py +21 -0
  36. nat/builder/__init__.py +0 -0
  37. nat/builder/builder.py +285 -0
  38. nat/builder/component_utils.py +316 -0
  39. nat/builder/context.py +270 -0
  40. nat/builder/embedder.py +24 -0
  41. nat/builder/eval_builder.py +161 -0
  42. nat/builder/evaluator.py +29 -0
  43. nat/builder/framework_enum.py +24 -0
  44. nat/builder/front_end.py +73 -0
  45. nat/builder/function.py +344 -0
  46. nat/builder/function_base.py +380 -0
  47. nat/builder/function_info.py +627 -0
  48. nat/builder/intermediate_step_manager.py +174 -0
  49. nat/builder/llm.py +25 -0
  50. nat/builder/retriever.py +25 -0
  51. nat/builder/user_interaction_manager.py +78 -0
  52. nat/builder/workflow.py +148 -0
  53. nat/builder/workflow_builder.py +1117 -0
  54. nat/cli/__init__.py +14 -0
  55. nat/cli/cli_utils/__init__.py +0 -0
  56. nat/cli/cli_utils/config_override.py +231 -0
  57. nat/cli/cli_utils/validation.py +37 -0
  58. nat/cli/commands/__init__.py +0 -0
  59. nat/cli/commands/configure/__init__.py +0 -0
  60. nat/cli/commands/configure/channel/__init__.py +0 -0
  61. nat/cli/commands/configure/channel/add.py +28 -0
  62. nat/cli/commands/configure/channel/channel.py +34 -0
  63. nat/cli/commands/configure/channel/remove.py +30 -0
  64. nat/cli/commands/configure/channel/update.py +30 -0
  65. nat/cli/commands/configure/configure.py +33 -0
  66. nat/cli/commands/evaluate.py +139 -0
  67. nat/cli/commands/info/__init__.py +14 -0
  68. nat/cli/commands/info/info.py +37 -0
  69. nat/cli/commands/info/list_channels.py +32 -0
  70. nat/cli/commands/info/list_components.py +129 -0
  71. nat/cli/commands/info/list_mcp.py +304 -0
  72. nat/cli/commands/registry/__init__.py +14 -0
  73. nat/cli/commands/registry/publish.py +88 -0
  74. nat/cli/commands/registry/pull.py +118 -0
  75. nat/cli/commands/registry/registry.py +36 -0
  76. nat/cli/commands/registry/remove.py +108 -0
  77. nat/cli/commands/registry/search.py +155 -0
  78. nat/cli/commands/sizing/__init__.py +14 -0
  79. nat/cli/commands/sizing/calc.py +297 -0
  80. nat/cli/commands/sizing/sizing.py +27 -0
  81. nat/cli/commands/start.py +246 -0
  82. nat/cli/commands/uninstall.py +81 -0
  83. nat/cli/commands/validate.py +47 -0
  84. nat/cli/commands/workflow/__init__.py +14 -0
  85. nat/cli/commands/workflow/templates/__init__.py.j2 +0 -0
  86. nat/cli/commands/workflow/templates/config.yml.j2 +16 -0
  87. nat/cli/commands/workflow/templates/pyproject.toml.j2 +22 -0
  88. nat/cli/commands/workflow/templates/register.py.j2 +5 -0
  89. nat/cli/commands/workflow/templates/workflow.py.j2 +36 -0
  90. nat/cli/commands/workflow/workflow.py +37 -0
  91. nat/cli/commands/workflow/workflow_commands.py +317 -0
  92. nat/cli/entrypoint.py +135 -0
  93. nat/cli/main.py +57 -0
  94. nat/cli/register_workflow.py +488 -0
  95. nat/cli/type_registry.py +1000 -0
  96. nat/data_models/__init__.py +14 -0
  97. nat/data_models/api_server.py +716 -0
  98. nat/data_models/authentication.py +231 -0
  99. nat/data_models/common.py +171 -0
  100. nat/data_models/component.py +58 -0
  101. nat/data_models/component_ref.py +168 -0
  102. nat/data_models/config.py +410 -0
  103. nat/data_models/dataset_handler.py +169 -0
  104. nat/data_models/discovery_metadata.py +305 -0
  105. nat/data_models/embedder.py +27 -0
  106. nat/data_models/evaluate.py +127 -0
  107. nat/data_models/evaluator.py +26 -0
  108. nat/data_models/front_end.py +26 -0
  109. nat/data_models/function.py +30 -0
  110. nat/data_models/function_dependencies.py +72 -0
  111. nat/data_models/interactive.py +246 -0
  112. nat/data_models/intermediate_step.py +302 -0
  113. nat/data_models/invocation_node.py +38 -0
  114. nat/data_models/llm.py +27 -0
  115. nat/data_models/logging.py +26 -0
  116. nat/data_models/memory.py +27 -0
  117. nat/data_models/object_store.py +44 -0
  118. nat/data_models/profiler.py +54 -0
  119. nat/data_models/registry_handler.py +26 -0
  120. nat/data_models/retriever.py +30 -0
  121. nat/data_models/retry_mixin.py +35 -0
  122. nat/data_models/span.py +190 -0
  123. nat/data_models/step_adaptor.py +64 -0
  124. nat/data_models/streaming.py +33 -0
  125. nat/data_models/swe_bench_model.py +54 -0
  126. nat/data_models/telemetry_exporter.py +26 -0
  127. nat/data_models/ttc_strategy.py +30 -0
  128. nat/embedder/__init__.py +0 -0
  129. nat/embedder/nim_embedder.py +59 -0
  130. nat/embedder/openai_embedder.py +43 -0
  131. nat/embedder/register.py +22 -0
  132. nat/eval/__init__.py +14 -0
  133. nat/eval/config.py +60 -0
  134. nat/eval/dataset_handler/__init__.py +0 -0
  135. nat/eval/dataset_handler/dataset_downloader.py +106 -0
  136. nat/eval/dataset_handler/dataset_filter.py +52 -0
  137. nat/eval/dataset_handler/dataset_handler.py +367 -0
  138. nat/eval/evaluate.py +510 -0
  139. nat/eval/evaluator/__init__.py +14 -0
  140. nat/eval/evaluator/base_evaluator.py +77 -0
  141. nat/eval/evaluator/evaluator_model.py +45 -0
  142. nat/eval/intermediate_step_adapter.py +99 -0
  143. nat/eval/rag_evaluator/__init__.py +0 -0
  144. nat/eval/rag_evaluator/evaluate.py +178 -0
  145. nat/eval/rag_evaluator/register.py +143 -0
  146. nat/eval/register.py +23 -0
  147. nat/eval/remote_workflow.py +133 -0
  148. nat/eval/runners/__init__.py +14 -0
  149. nat/eval/runners/config.py +39 -0
  150. nat/eval/runners/multi_eval_runner.py +54 -0
  151. nat/eval/runtime_event_subscriber.py +52 -0
  152. nat/eval/swe_bench_evaluator/__init__.py +0 -0
  153. nat/eval/swe_bench_evaluator/evaluate.py +215 -0
  154. nat/eval/swe_bench_evaluator/register.py +36 -0
  155. nat/eval/trajectory_evaluator/__init__.py +0 -0
  156. nat/eval/trajectory_evaluator/evaluate.py +75 -0
  157. nat/eval/trajectory_evaluator/register.py +40 -0
  158. nat/eval/tunable_rag_evaluator/__init__.py +0 -0
  159. nat/eval/tunable_rag_evaluator/evaluate.py +245 -0
  160. nat/eval/tunable_rag_evaluator/register.py +52 -0
  161. nat/eval/usage_stats.py +41 -0
  162. nat/eval/utils/__init__.py +0 -0
  163. nat/eval/utils/output_uploader.py +140 -0
  164. nat/eval/utils/tqdm_position_registry.py +40 -0
  165. nat/eval/utils/weave_eval.py +184 -0
  166. nat/experimental/__init__.py +0 -0
  167. nat/experimental/decorators/__init__.py +0 -0
  168. nat/experimental/decorators/experimental_warning_decorator.py +134 -0
  169. nat/experimental/test_time_compute/__init__.py +0 -0
  170. nat/experimental/test_time_compute/editing/__init__.py +0 -0
  171. nat/experimental/test_time_compute/editing/iterative_plan_refinement_editor.py +147 -0
  172. nat/experimental/test_time_compute/editing/llm_as_a_judge_editor.py +204 -0
  173. nat/experimental/test_time_compute/editing/motivation_aware_summarization.py +107 -0
  174. nat/experimental/test_time_compute/functions/__init__.py +0 -0
  175. nat/experimental/test_time_compute/functions/execute_score_select_function.py +105 -0
  176. nat/experimental/test_time_compute/functions/plan_select_execute_function.py +224 -0
  177. nat/experimental/test_time_compute/functions/ttc_tool_orchestration_function.py +205 -0
  178. nat/experimental/test_time_compute/functions/ttc_tool_wrapper_function.py +146 -0
  179. nat/experimental/test_time_compute/models/__init__.py +0 -0
  180. nat/experimental/test_time_compute/models/editor_config.py +132 -0
  181. nat/experimental/test_time_compute/models/scoring_config.py +112 -0
  182. nat/experimental/test_time_compute/models/search_config.py +120 -0
  183. nat/experimental/test_time_compute/models/selection_config.py +154 -0
  184. nat/experimental/test_time_compute/models/stage_enums.py +43 -0
  185. nat/experimental/test_time_compute/models/strategy_base.py +66 -0
  186. nat/experimental/test_time_compute/models/tool_use_config.py +41 -0
  187. nat/experimental/test_time_compute/models/ttc_item.py +48 -0
  188. nat/experimental/test_time_compute/register.py +36 -0
  189. nat/experimental/test_time_compute/scoring/__init__.py +0 -0
  190. nat/experimental/test_time_compute/scoring/llm_based_agent_scorer.py +168 -0
  191. nat/experimental/test_time_compute/scoring/llm_based_plan_scorer.py +168 -0
  192. nat/experimental/test_time_compute/scoring/motivation_aware_scorer.py +111 -0
  193. nat/experimental/test_time_compute/search/__init__.py +0 -0
  194. nat/experimental/test_time_compute/search/multi_llm_planner.py +128 -0
  195. nat/experimental/test_time_compute/search/multi_query_retrieval_search.py +122 -0
  196. nat/experimental/test_time_compute/search/single_shot_multi_plan_planner.py +128 -0
  197. nat/experimental/test_time_compute/selection/__init__.py +0 -0
  198. nat/experimental/test_time_compute/selection/best_of_n_selector.py +63 -0
  199. nat/experimental/test_time_compute/selection/llm_based_agent_output_selector.py +131 -0
  200. nat/experimental/test_time_compute/selection/llm_based_output_merging_selector.py +159 -0
  201. nat/experimental/test_time_compute/selection/llm_based_plan_selector.py +128 -0
  202. nat/experimental/test_time_compute/selection/threshold_selector.py +58 -0
  203. nat/front_ends/__init__.py +14 -0
  204. nat/front_ends/console/__init__.py +14 -0
  205. nat/front_ends/console/authentication_flow_handler.py +233 -0
  206. nat/front_ends/console/console_front_end_config.py +32 -0
  207. nat/front_ends/console/console_front_end_plugin.py +96 -0
  208. nat/front_ends/console/register.py +25 -0
  209. nat/front_ends/cron/__init__.py +14 -0
  210. nat/front_ends/fastapi/__init__.py +14 -0
  211. nat/front_ends/fastapi/auth_flow_handlers/__init__.py +0 -0
  212. nat/front_ends/fastapi/auth_flow_handlers/http_flow_handler.py +27 -0
  213. nat/front_ends/fastapi/auth_flow_handlers/websocket_flow_handler.py +107 -0
  214. nat/front_ends/fastapi/fastapi_front_end_config.py +241 -0
  215. nat/front_ends/fastapi/fastapi_front_end_controller.py +68 -0
  216. nat/front_ends/fastapi/fastapi_front_end_plugin.py +116 -0
  217. nat/front_ends/fastapi/fastapi_front_end_plugin_worker.py +1087 -0
  218. nat/front_ends/fastapi/html_snippets/__init__.py +14 -0
  219. nat/front_ends/fastapi/html_snippets/auth_code_grant_success.py +35 -0
  220. nat/front_ends/fastapi/intermediate_steps_subscriber.py +80 -0
  221. nat/front_ends/fastapi/job_store.py +183 -0
  222. nat/front_ends/fastapi/main.py +72 -0
  223. nat/front_ends/fastapi/message_handler.py +320 -0
  224. nat/front_ends/fastapi/message_validator.py +352 -0
  225. nat/front_ends/fastapi/register.py +25 -0
  226. nat/front_ends/fastapi/response_helpers.py +195 -0
  227. nat/front_ends/fastapi/step_adaptor.py +319 -0
  228. nat/front_ends/mcp/__init__.py +14 -0
  229. nat/front_ends/mcp/mcp_front_end_config.py +36 -0
  230. nat/front_ends/mcp/mcp_front_end_plugin.py +81 -0
  231. nat/front_ends/mcp/mcp_front_end_plugin_worker.py +143 -0
  232. nat/front_ends/mcp/register.py +27 -0
  233. nat/front_ends/mcp/tool_converter.py +241 -0
  234. nat/front_ends/register.py +22 -0
  235. nat/front_ends/simple_base/__init__.py +14 -0
  236. nat/front_ends/simple_base/simple_front_end_plugin_base.py +54 -0
  237. nat/llm/__init__.py +0 -0
  238. nat/llm/aws_bedrock_llm.py +57 -0
  239. nat/llm/nim_llm.py +46 -0
  240. nat/llm/openai_llm.py +46 -0
  241. nat/llm/register.py +23 -0
  242. nat/llm/utils/__init__.py +14 -0
  243. nat/llm/utils/env_config_value.py +94 -0
  244. nat/llm/utils/error.py +17 -0
  245. nat/memory/__init__.py +20 -0
  246. nat/memory/interfaces.py +183 -0
  247. nat/memory/models.py +112 -0
  248. nat/meta/pypi.md +58 -0
  249. nat/object_store/__init__.py +20 -0
  250. nat/object_store/in_memory_object_store.py +76 -0
  251. nat/object_store/interfaces.py +84 -0
  252. nat/object_store/models.py +38 -0
  253. nat/object_store/register.py +20 -0
  254. nat/observability/__init__.py +14 -0
  255. nat/observability/exporter/__init__.py +14 -0
  256. nat/observability/exporter/base_exporter.py +449 -0
  257. nat/observability/exporter/exporter.py +78 -0
  258. nat/observability/exporter/file_exporter.py +33 -0
  259. nat/observability/exporter/processing_exporter.py +322 -0
  260. nat/observability/exporter/raw_exporter.py +52 -0
  261. nat/observability/exporter/span_exporter.py +288 -0
  262. nat/observability/exporter_manager.py +335 -0
  263. nat/observability/mixin/__init__.py +14 -0
  264. nat/observability/mixin/batch_config_mixin.py +26 -0
  265. nat/observability/mixin/collector_config_mixin.py +23 -0
  266. nat/observability/mixin/file_mixin.py +288 -0
  267. nat/observability/mixin/file_mode.py +23 -0
  268. nat/observability/mixin/resource_conflict_mixin.py +134 -0
  269. nat/observability/mixin/serialize_mixin.py +61 -0
  270. nat/observability/mixin/type_introspection_mixin.py +183 -0
  271. nat/observability/processor/__init__.py +14 -0
  272. nat/observability/processor/batching_processor.py +310 -0
  273. nat/observability/processor/callback_processor.py +42 -0
  274. nat/observability/processor/intermediate_step_serializer.py +28 -0
  275. nat/observability/processor/processor.py +71 -0
  276. nat/observability/register.py +96 -0
  277. nat/observability/utils/__init__.py +14 -0
  278. nat/observability/utils/dict_utils.py +236 -0
  279. nat/observability/utils/time_utils.py +31 -0
  280. nat/plugins/.namespace +1 -0
  281. nat/profiler/__init__.py +0 -0
  282. nat/profiler/calc/__init__.py +14 -0
  283. nat/profiler/calc/calc_runner.py +627 -0
  284. nat/profiler/calc/calculations.py +288 -0
  285. nat/profiler/calc/data_models.py +188 -0
  286. nat/profiler/calc/plot.py +345 -0
  287. nat/profiler/callbacks/__init__.py +0 -0
  288. nat/profiler/callbacks/agno_callback_handler.py +295 -0
  289. nat/profiler/callbacks/base_callback_class.py +20 -0
  290. nat/profiler/callbacks/langchain_callback_handler.py +290 -0
  291. nat/profiler/callbacks/llama_index_callback_handler.py +205 -0
  292. nat/profiler/callbacks/semantic_kernel_callback_handler.py +238 -0
  293. nat/profiler/callbacks/token_usage_base_model.py +27 -0
  294. nat/profiler/data_frame_row.py +51 -0
  295. nat/profiler/data_models.py +24 -0
  296. nat/profiler/decorators/__init__.py +0 -0
  297. nat/profiler/decorators/framework_wrapper.py +131 -0
  298. nat/profiler/decorators/function_tracking.py +254 -0
  299. nat/profiler/forecasting/__init__.py +0 -0
  300. nat/profiler/forecasting/config.py +18 -0
  301. nat/profiler/forecasting/model_trainer.py +75 -0
  302. nat/profiler/forecasting/models/__init__.py +22 -0
  303. nat/profiler/forecasting/models/forecasting_base_model.py +40 -0
  304. nat/profiler/forecasting/models/linear_model.py +197 -0
  305. nat/profiler/forecasting/models/random_forest_regressor.py +269 -0
  306. nat/profiler/inference_metrics_model.py +28 -0
  307. nat/profiler/inference_optimization/__init__.py +0 -0
  308. nat/profiler/inference_optimization/bottleneck_analysis/__init__.py +0 -0
  309. nat/profiler/inference_optimization/bottleneck_analysis/nested_stack_analysis.py +460 -0
  310. nat/profiler/inference_optimization/bottleneck_analysis/simple_stack_analysis.py +258 -0
  311. nat/profiler/inference_optimization/data_models.py +386 -0
  312. nat/profiler/inference_optimization/experimental/__init__.py +0 -0
  313. nat/profiler/inference_optimization/experimental/concurrency_spike_analysis.py +468 -0
  314. nat/profiler/inference_optimization/experimental/prefix_span_analysis.py +405 -0
  315. nat/profiler/inference_optimization/llm_metrics.py +212 -0
  316. nat/profiler/inference_optimization/prompt_caching.py +163 -0
  317. nat/profiler/inference_optimization/token_uniqueness.py +107 -0
  318. nat/profiler/inference_optimization/workflow_runtimes.py +72 -0
  319. nat/profiler/intermediate_property_adapter.py +102 -0
  320. nat/profiler/profile_runner.py +473 -0
  321. nat/profiler/utils.py +184 -0
  322. nat/registry_handlers/__init__.py +0 -0
  323. nat/registry_handlers/local/__init__.py +0 -0
  324. nat/registry_handlers/local/local_handler.py +176 -0
  325. nat/registry_handlers/local/register_local.py +37 -0
  326. nat/registry_handlers/metadata_factory.py +60 -0
  327. nat/registry_handlers/package_utils.py +571 -0
  328. nat/registry_handlers/pypi/__init__.py +0 -0
  329. nat/registry_handlers/pypi/pypi_handler.py +251 -0
  330. nat/registry_handlers/pypi/register_pypi.py +40 -0
  331. nat/registry_handlers/register.py +21 -0
  332. nat/registry_handlers/registry_handler_base.py +157 -0
  333. nat/registry_handlers/rest/__init__.py +0 -0
  334. nat/registry_handlers/rest/register_rest.py +56 -0
  335. nat/registry_handlers/rest/rest_handler.py +237 -0
  336. nat/registry_handlers/schemas/__init__.py +0 -0
  337. nat/registry_handlers/schemas/headers.py +42 -0
  338. nat/registry_handlers/schemas/package.py +68 -0
  339. nat/registry_handlers/schemas/publish.py +68 -0
  340. nat/registry_handlers/schemas/pull.py +82 -0
  341. nat/registry_handlers/schemas/remove.py +36 -0
  342. nat/registry_handlers/schemas/search.py +91 -0
  343. nat/registry_handlers/schemas/status.py +47 -0
  344. nat/retriever/__init__.py +0 -0
  345. nat/retriever/interface.py +41 -0
  346. nat/retriever/milvus/__init__.py +14 -0
  347. nat/retriever/milvus/register.py +81 -0
  348. nat/retriever/milvus/retriever.py +228 -0
  349. nat/retriever/models.py +77 -0
  350. nat/retriever/nemo_retriever/__init__.py +14 -0
  351. nat/retriever/nemo_retriever/register.py +60 -0
  352. nat/retriever/nemo_retriever/retriever.py +190 -0
  353. nat/retriever/register.py +22 -0
  354. nat/runtime/__init__.py +14 -0
  355. nat/runtime/loader.py +220 -0
  356. nat/runtime/runner.py +195 -0
  357. nat/runtime/session.py +162 -0
  358. nat/runtime/user_metadata.py +130 -0
  359. nat/settings/__init__.py +0 -0
  360. nat/settings/global_settings.py +318 -0
  361. nat/test/.namespace +1 -0
  362. nat/tool/__init__.py +0 -0
  363. nat/tool/chat_completion.py +74 -0
  364. nat/tool/code_execution/README.md +151 -0
  365. nat/tool/code_execution/__init__.py +0 -0
  366. nat/tool/code_execution/code_sandbox.py +267 -0
  367. nat/tool/code_execution/local_sandbox/.gitignore +1 -0
  368. nat/tool/code_execution/local_sandbox/Dockerfile.sandbox +60 -0
  369. nat/tool/code_execution/local_sandbox/__init__.py +13 -0
  370. nat/tool/code_execution/local_sandbox/local_sandbox_server.py +198 -0
  371. nat/tool/code_execution/local_sandbox/sandbox.requirements.txt +6 -0
  372. nat/tool/code_execution/local_sandbox/start_local_sandbox.sh +50 -0
  373. nat/tool/code_execution/register.py +74 -0
  374. nat/tool/code_execution/test_code_execution_sandbox.py +414 -0
  375. nat/tool/code_execution/utils.py +100 -0
  376. nat/tool/datetime_tools.py +42 -0
  377. nat/tool/document_search.py +141 -0
  378. nat/tool/github_tools/__init__.py +0 -0
  379. nat/tool/github_tools/create_github_commit.py +133 -0
  380. nat/tool/github_tools/create_github_issue.py +87 -0
  381. nat/tool/github_tools/create_github_pr.py +106 -0
  382. nat/tool/github_tools/get_github_file.py +106 -0
  383. nat/tool/github_tools/get_github_issue.py +166 -0
  384. nat/tool/github_tools/get_github_pr.py +256 -0
  385. nat/tool/github_tools/update_github_issue.py +100 -0
  386. nat/tool/mcp/__init__.py +14 -0
  387. nat/tool/mcp/exceptions.py +142 -0
  388. nat/tool/mcp/mcp_client.py +255 -0
  389. nat/tool/mcp/mcp_tool.py +96 -0
  390. nat/tool/memory_tools/__init__.py +0 -0
  391. nat/tool/memory_tools/add_memory_tool.py +79 -0
  392. nat/tool/memory_tools/delete_memory_tool.py +67 -0
  393. nat/tool/memory_tools/get_memory_tool.py +72 -0
  394. nat/tool/nvidia_rag.py +95 -0
  395. nat/tool/register.py +38 -0
  396. nat/tool/retriever.py +94 -0
  397. nat/tool/server_tools.py +66 -0
  398. nat/utils/__init__.py +0 -0
  399. nat/utils/data_models/__init__.py +0 -0
  400. nat/utils/data_models/schema_validator.py +58 -0
  401. nat/utils/debugging_utils.py +43 -0
  402. nat/utils/dump_distro_mapping.py +32 -0
  403. nat/utils/exception_handlers/__init__.py +0 -0
  404. nat/utils/exception_handlers/automatic_retries.py +289 -0
  405. nat/utils/exception_handlers/mcp.py +211 -0
  406. nat/utils/exception_handlers/schemas.py +114 -0
  407. nat/utils/io/__init__.py +0 -0
  408. nat/utils/io/model_processing.py +28 -0
  409. nat/utils/io/yaml_tools.py +119 -0
  410. nat/utils/log_utils.py +37 -0
  411. nat/utils/metadata_utils.py +74 -0
  412. nat/utils/optional_imports.py +142 -0
  413. nat/utils/producer_consumer_queue.py +178 -0
  414. nat/utils/reactive/__init__.py +0 -0
  415. nat/utils/reactive/base/__init__.py +0 -0
  416. nat/utils/reactive/base/observable_base.py +65 -0
  417. nat/utils/reactive/base/observer_base.py +55 -0
  418. nat/utils/reactive/base/subject_base.py +79 -0
  419. nat/utils/reactive/observable.py +59 -0
  420. nat/utils/reactive/observer.py +76 -0
  421. nat/utils/reactive/subject.py +131 -0
  422. nat/utils/reactive/subscription.py +49 -0
  423. nat/utils/settings/__init__.py +0 -0
  424. nat/utils/settings/global_settings.py +197 -0
  425. nat/utils/string_utils.py +38 -0
  426. nat/utils/type_converter.py +290 -0
  427. nat/utils/type_utils.py +484 -0
  428. nat/utils/url_utils.py +27 -0
  429. nvidia_nat-1.2.0.dist-info/METADATA +365 -0
  430. nvidia_nat-1.2.0.dist-info/RECORD +435 -0
  431. nvidia_nat-1.2.0.dist-info/WHEEL +5 -0
  432. nvidia_nat-1.2.0.dist-info/entry_points.txt +21 -0
  433. nvidia_nat-1.2.0.dist-info/licenses/LICENSE-3rd-party.txt +5478 -0
  434. nvidia_nat-1.2.0.dist-info/licenses/LICENSE.md +201 -0
  435. nvidia_nat-1.2.0.dist-info/top_level.txt +2 -0
@@ -0,0 +1,146 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import logging
17
+
18
+ from pydantic import BaseModel
19
+ from pydantic import Field
20
+
21
+ from nat.builder.builder import Builder
22
+ from nat.builder.framework_enum import LLMFrameworkEnum
23
+ from nat.builder.function import Function
24
+ from nat.builder.function_info import FunctionInfo
25
+ from nat.cli.register_workflow import register_function
26
+ from nat.data_models.component_ref import FunctionRef
27
+ from nat.data_models.component_ref import LLMRef
28
+ from nat.data_models.function import FunctionBaseConfig
29
+ from nat.utils.string_utils import convert_to_str
30
+
31
+ logger = logging.getLogger(__name__)
32
+
33
+
34
+ class TTCToolWrapperFunctionConfig(FunctionBaseConfig, name="ttc_tool_wrapper"):
35
+ """
36
+ Configuration for the TTCToolWrapperFunction, which is used to wrap a function that will be executed
37
+ in the inference time scaling pipeline.
38
+
39
+ This function is responsible for turning an 'objective' or description for the tool into tool input.
40
+
41
+ NOTE: Only supports LLMs with structured output.
42
+ """
43
+
44
+ augmented_fn: FunctionRef = Field(description="The name of the function to reason on.")
45
+
46
+ input_llm: LLMRef = Field(description="The LLM that will generate input to the function.")
47
+ verbose: bool = Field(default=False, description="Whether to log detailed information.")
48
+
49
+ downstream_template: str = Field(
50
+ description="The template for the input LLM to generate structured input to the function.",
51
+ default=("You are highly sophisticated generalist AI assistant. Your objective is to act as a"
52
+ " conduit between a user's task for a function and the function itself. You will be given a general "
53
+ "description of the task, or pseudo input for a function. You will also be provided with description "
54
+ "of the function, its input schema, and the output schema. Your task is to generate structured input "
55
+ "to the function based on the description of the task and the function's input schema. If you do not "
56
+ "have enough information to generate structured input, you should respond with 'NOT ENOUGH "
57
+ "INFORMATION'. \n\n The description of the function is: {function_description}\n\n"
58
+ "The input schema of the function is: {input_schema}\n\n"
59
+ "The output schema of the function is: {output_schema}\n\n"
60
+ "The description of the task is: {task_description}\n\n"
61
+ "The structured input to the function is: "))
62
+
63
+ tool_description: str | None = Field(description="The description of the tool to be used for the function.",
64
+ default=None)
65
+
66
+
67
+ @register_function(config_type=TTCToolWrapperFunctionConfig, framework_wrappers=[LLMFrameworkEnum.LANGCHAIN])
68
+ async def register_ttc_tool_wrapper_function(
69
+ config: TTCToolWrapperFunctionConfig,
70
+ builder: Builder,
71
+ ):
72
+ """
73
+ Register the TTCToolWrapperFunction with the provided builder and configuration.
74
+ """
75
+
76
+ try:
77
+ from langchain_core.language_models import BaseChatModel
78
+ from langchain_core.prompts import PromptTemplate
79
+ except ImportError:
80
+ raise ImportError("langchain-core is not installed. Please install it to use SingleShotMultiPlanPlanner.\n"
81
+ "This error can be resolved by installing nvidia-nat-langchain.")
82
+
83
+ augmented_function: Function = builder.get_function(config.augmented_fn)
84
+ input_llm: BaseChatModel = await builder.get_llm(config.input_llm, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
85
+
86
+ if not augmented_function.has_single_output:
87
+ raise ValueError("TTCToolWrapperFunction only supports functions with a single output.")
88
+
89
+ if not augmented_function.has_single_output:
90
+ raise ValueError("TTCToolWrapperFunction only supports functions with a single output.")
91
+
92
+ if augmented_function.description and augmented_function.description != "":
93
+ augmented_function_desc = augmented_function.description
94
+ else:
95
+ if not config.tool_description:
96
+ raise ValueError(f"Function {config.augmented_fn} does not have a description. Cannot augment "
97
+ f"function without a description and without a tool description.")
98
+
99
+ augmented_function_desc = config.tool_description
100
+
101
+ fn_input_schema: BaseModel = augmented_function.input_schema
102
+ fn_output_schema: BaseModel = augmented_function.single_output_schema
103
+
104
+ runnable_llm = input_llm.with_structured_output(schema=fn_input_schema)
105
+
106
+ template = PromptTemplate(
107
+ template=config.downstream_template,
108
+ input_variables=["function_description", "input_schema", "output_schema", "task_description"],
109
+ validate_template=True)
110
+
111
+ function_description = (f"\nDescription: {augmented_function_desc}\n" +
112
+ "\n Input should be a thorough description with all relevant information on what "
113
+ f"the tool should do. The tool requires information about "
114
+ f"{fn_input_schema.model_fields}")
115
+
116
+ async def single_inner(input_message: str) -> fn_output_schema:
117
+ """
118
+ Inner function to handle the streaming output of the TTCToolWrapperFunction.
119
+ It generates structured input for the augmented function based on the input message.
120
+ """
121
+
122
+ prompt = await template.ainvoke(
123
+ input={
124
+ "function_description": augmented_function_desc,
125
+ "input_schema": fn_input_schema,
126
+ "output_schema": fn_output_schema,
127
+ "task_description": input_message
128
+ })
129
+
130
+ prompt = prompt.to_string()
131
+
132
+ if config.verbose:
133
+ logger.info("TTCToolWrapperFunction: Generated prompt: %s", prompt)
134
+
135
+ llm_parsed = await runnable_llm.ainvoke(prompt)
136
+
137
+ if not llm_parsed:
138
+ logger.warning("TTCToolWrapperFunction: LLM parsing error")
139
+ return "Not enough information"
140
+
141
+ # Call the augmented function with the structured input
142
+ result = await augmented_function.acall_invoke(llm_parsed)
143
+
144
+ return result
145
+
146
+ yield FunctionInfo.from_fn(fn=single_inner, description=function_description, converters=[convert_to_str])
File without changes
@@ -0,0 +1,132 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import typing
17
+
18
+ from pydantic import Field
19
+ from pydantic import model_validator
20
+
21
+ from nat.data_models.component_ref import LLMRef
22
+ from nat.data_models.ttc_strategy import TTCStrategyBaseConfig
23
+
24
+
25
+ class LLMAsAJudgeEditorConfig(TTCStrategyBaseConfig, name="llm_as_a_judge_editor"):
26
+ """
27
+ Configuration for the LLMAsAJudgeEditor.
28
+ """
29
+ num_feedback: int = Field(default=10,
30
+ description="Number of feedback items to generate for each plan during editing. "
31
+ "This can help in refining the plans based on feedback.")
32
+
33
+ # If strategy is provided, LLM must be
34
+ editing_llm: LLMRef | typing.Any | None = Field(
35
+ default=None,
36
+ description="The LLM to use for editing the plans. This can be a callable or an instance of an LLM client.")
37
+
38
+ # If strategy is LLM_AS_A_JUDGE, ensure that the feedback_llm is provided.
39
+ feedback_llm: LLMRef | typing.Any | None = Field(default=None,
40
+ description="The LLM to use for generating feedback on the plans."
41
+ " This can be a callable or an instance of an LLM client.")
42
+
43
+ editor_template: str = Field(default=(
44
+ "You are an expert at improving execution plans. You will be given a plan and feedback on that plan."
45
+ " Your task is to create an improved version of the plan that addresses the feedback "
46
+ "while maintaining its strengths.\n\n"
47
+ "Here is the context:\n\n"
48
+ "{context}\n\n"
49
+ "**Input:** \n{original_prompt}\n\n"
50
+ "**Original Plan:**\n{plan}\n\n"
51
+ "**Feedback on the Plan:**\n{feedback}\n\n"
52
+ "Please provide an improved version of the plan that addresses"
53
+ " the feedback points. Maintain the same structure and "
54
+ "step-by-step format, but enhance the content. Do not include explanations of your changes, just provide the "
55
+ "improved plan directly:\n\n"
56
+ "Begin the final improve plan with 'EDITED PLAN:'"),
57
+ description="The template to use for editing the planning items based on feedback.")
58
+
59
+ feedback_template: str = Field(
60
+ default=("You are an expert at evaluating execution plans. You will be given a plan and "
61
+ "need to provide {num_feedback} "
62
+ "specific points of feedback about its strengths and weaknesses.\n\n"
63
+ "Your feedback should cover aspects like:\n"
64
+ "- Comprehensiveness of the plan\n"
65
+ "- Logical flow and sequencing\n"
66
+ "- Appropriate use of available tools\n"
67
+ "- Potential edge cases or failure points\n"
68
+ "- Efficiency and optimization opportunities\n\n"
69
+ "Here is the context and plan to evaluate:\n\n"
70
+ "{context}\n\n"
71
+ "**Objective:** \n{original_prompt}\n\n"
72
+ "**Plan to Evaluate:**\n{plan}\n\n"
73
+ "Please provide exactly {num_feedback} numbered points of feedback, including "
74
+ "both strengths and areas for improvement. Begin the feedback with 'FEEDBACK:' and provide"
75
+ "{num_feedback} specific feedback points."),
76
+ description="The template to use for generating feedback for each planning item.")
77
+
78
+ @model_validator(mode="before")
79
+ def validate_strategies(cls, values: dict[str, typing.Any]) -> dict[str, typing.Any]:
80
+
81
+ if values.get('editing_llm') is None:
82
+ raise ValueError('editing_llm must be provided when editing_strategy is set.')
83
+ # If editing strategy is LLM_AS_A_JUDGE, feedback_llm must also be provided
84
+ if (values.get('feedback_llm') is None):
85
+ raise ValueError('feedback_llm must be provided when editing_strategy is LLM_AS_A_JUDGE.')
86
+
87
+ return values
88
+
89
+
90
+ class IterativePlanRefinementConfig(TTCStrategyBaseConfig, name="iterative_plan_refinement"):
91
+ """Configuration for an 'iterative plan refinement' strategy."""
92
+ editor_llm: LLMRef | typing.Any | None = Field(
93
+ default=None, description="The LLM to use for generating and refining the plan across multiple iterations.")
94
+ num_iterations: int = Field(default=3, description="How many refinement steps to perform.")
95
+ refinement_template: str = Field(
96
+ default=("You have the current plan:\n{current_plan}\n\n"
97
+ "The plan was generated to achieve the following objective:\n{original_prompt}\n\n"
98
+ "Using an agent system with the following description:\n{context}\n\n"
99
+ "Refine or improve it to achieve the objective better."
100
+ "Output the updated plan, beginning with:\nEDITED PLAN:\n"),
101
+ description="Prompt used in each iteration to refine the plan.")
102
+
103
+ @model_validator(mode="before")
104
+ def validate_iterative_strategies(cls, values: dict) -> dict:
105
+ if not values.get('editor_llm'):
106
+ raise ValueError('planning_llm must be provided for iterative plan refinement.')
107
+ if values.get('num_iterations', 0) < 1:
108
+ raise ValueError('num_iterations must be >= 1 for iterative plan refinement.')
109
+ return values
110
+
111
+
112
+ class MotivationAwareSummarizationConfig(TTCStrategyBaseConfig, name="motivation_aware_editing"):
113
+ """
114
+ Configuration for the MotivationAwareSummarization strategy.
115
+ """
116
+ editor_llm: LLMRef | typing.Any | None = Field(
117
+ default=None,
118
+ description="The LLM to use for editing the plans. This can be a callable or an instance of an LLM client.")
119
+
120
+ editor_template: str = Field(
121
+ default=("You are an expert at summarizing key information from relevant documents based on an input task"
122
+ "and motivation. Given a task and motivation, and documents, your task is to create a concise "
123
+ "a summarized response to the task and motivation grounded in the documents .\n\n"
124
+ "Here is the task:\n\n"
125
+ "{task}\n\n"
126
+ "Here is the motivation:\n\n"
127
+ "{motivation}\n\n"
128
+ "and here are the documents:\n\n"
129
+ "{output}\n\n"
130
+ "Please respond with a concise summary that addresses the task and motivation, in at most one"
131
+ "or two sentences. Do not include any other output except the summary. "),
132
+ description="The template to use for summarizing documents.")
@@ -0,0 +1,112 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import typing
17
+
18
+ from pydantic import Field
19
+ from pydantic import model_validator
20
+
21
+ from nat.data_models.component_ref import LLMRef
22
+ from nat.data_models.ttc_strategy import TTCStrategyBaseConfig
23
+
24
+
25
+ class LLMBasedPlanScoringConfig(TTCStrategyBaseConfig, name="llm_based_plan_scoring"):
26
+ """
27
+ Configuration for LLMBasedScoring.
28
+ """
29
+ scoring_llm: LLMRef | typing.Any | None = Field(
30
+ default=None,
31
+ description="The LLM to use for scoring the plans. This can be a callable or an instance of an LLM client.")
32
+
33
+ scoring_template: str = Field(
34
+ default=("You are an expert reasoning model tasked with scoring the following execution plan based on its"
35
+ "quality and relevance to the provided input to an agent system.\n\n"
36
+ "The agent system's role is:\n{context}\n\n"
37
+ "It has been tasked with achieving the following goal: \n{original_prompt}\n\n"
38
+ "The following plan has been generated to achieve this goal:\n\n{plan}\n\n"
39
+ "Score the plan on a scale from 1 to 10, where 10 is the best. "
40
+ "Return the final score as a floating point number preceded by `FINAL SCORE:` without any "
41
+ "other text before or after it\n"),
42
+ description="The template to use for scoring the plans.")
43
+
44
+ @model_validator(mode="before")
45
+ def validate_strategies(cls, values: dict[str, typing.Any]) -> dict[str, typing.Any]:
46
+ """
47
+ Ensure that the scoring_llm is provided when using LLMBasedScoring.
48
+ """
49
+ if values.get('scoring_llm') is None:
50
+ raise ValueError('scoring_llm must be provided when scorer_type is set to LLM_BASED_SCORING.')
51
+
52
+ return values
53
+
54
+
55
+ class LLMBasedAgentScoringConfig(TTCStrategyBaseConfig, name="llm_based_agent_scoring"):
56
+ """
57
+ Configuration for LLMBasedScoring.
58
+ """
59
+ scoring_llm: LLMRef | typing.Any | None = Field(
60
+ default=None,
61
+ description="The LLM to use for scoring the plans. This can be a callable or an instance of an LLM client.")
62
+
63
+ scoring_template: str = Field(
64
+ description="Prompt template to use for scoring the function output",
65
+ default=("You are an expert reasoning model tasked with scoring the following "
66
+ "result of an agent system based on its input and objective. Judge"
67
+ " the quality and relevance of the answer to score it.\n\n"
68
+ "The agent system's objective is:\n{objective}\n\n"
69
+ "It has been tasked with achieving the following goal: \n{input}\n\n"
70
+ "The following output has been generated by the agent:\n\n{output}\n\n"
71
+ "Score the result on a scale from 1 to 10, where 10 is the best. "
72
+ "Return the final score as a floating point number preceded by `FINAL SCORE:` without any "
73
+ "other text before or after it\n"),
74
+ )
75
+
76
+ @model_validator(mode="before")
77
+ def validate_strategies(cls, values: dict[str, typing.Any]) -> dict[str, typing.Any]:
78
+ """
79
+ Ensure that the scoring_llm is provided when using LLMBasedScoring.
80
+ """
81
+ if values.get('scoring_llm') is None:
82
+ raise ValueError('scoring_llm must be provided when scorer_type is set to LLM_BASED_SCORING.')
83
+
84
+ return values
85
+
86
+
87
+ class MotivationAwareScoringConfig(TTCStrategyBaseConfig, name="motivation_aware_scoring"):
88
+ """
89
+ Configuration for a scoring strategy that considers both the original input (task)
90
+ and the motivation (from metadata) along with the current output.
91
+ """
92
+
93
+ scoring_llm: LLMRef | None = Field(
94
+ default=None, description="The LLM used to evaluate how well the output addresses the task plus motivation.")
95
+
96
+ scoring_template: str = Field(
97
+ default=("You are an expert at assessing the quality of an output in relation to its task and motivation.\n"
98
+ "Task: {task}\n"
99
+ "Motivation: {motivation}\n"
100
+ "Output: {output}\n"
101
+ "On a scale from 1 to 10 (10 being the best), how well does this output fulfill "
102
+ "the original task in the context "
103
+ "of the provided motivation? Note that the task might answer one part of a bigger question "
104
+ "which should count as a satisfactory response and should not receive a lower score.\n"
105
+ "Return the final score as a floating point number preceded by 'FINAL SCORE:'."),
106
+ description="The prompt template used to evaluate and score the output.")
107
+
108
+ @model_validator(mode="before")
109
+ def validate_scoring_llm(cls, values):
110
+ if values.get('scoring_llm') is None:
111
+ raise ValueError("A scoring_llm must be provided for motivation_aware_scoring.")
112
+ return values
@@ -0,0 +1,120 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import typing
17
+
18
+ from pydantic import Field
19
+ from pydantic import model_validator
20
+
21
+ from nat.data_models.component_ref import LLMRef
22
+ from nat.data_models.ttc_strategy import TTCStrategyBaseConfig
23
+
24
+
25
+ class SingleShotMultiPlanConfig(TTCStrategyBaseConfig, name="single_shot_multi_plan"):
26
+ num_plans: int = Field(default=4, description="Number of plans to generate.")
27
+ max_temperature: float = Field(default=1.0,
28
+ description="Maximum temperature to use for sampling when generating plans. "
29
+ "This can help control the randomness of the generated plans.")
30
+ min_temperature: float = Field(default=0.5,
31
+ description="Minimum temperature to use for sampling when generating plans. "
32
+ "This can help control the randomness of the generated plans.")
33
+ # If strategy is provided, LLM must be
34
+ planning_llm: LLMRef | typing.Any | None = Field(
35
+ default=None,
36
+ description="The LLM to use for planning. This can be a callable or an "
37
+ "instance of an LLM client.")
38
+
39
+ planning_template: str = Field(
40
+ default=("You are an expert reasoning model task with creating a detailed execution plan"
41
+ " for a system that has the following information to get the result of a given input:\n\n"
42
+ "**System Information:**\n {context}"
43
+ "**Input:** \n{prompt}\n\n"
44
+ "An example plan could look like this:\n\n"
45
+ "1. Call tool A with input X\n"
46
+ "2. Call tool B with input Y\n"
47
+ "3. Interpret the output of tool A and B\n"
48
+ "4. Return the final result"
49
+ "\n\nBegin the final plan with PLAN:\n"),
50
+ description="The template to use for generating plans.")
51
+
52
+ @model_validator(mode="before")
53
+ def validate_strategies(cls, values: dict[str, typing.Any]) -> dict[str, typing.Any]:
54
+ """
55
+ Ensure that the required LLMs are provided based on the selected strategies.
56
+ """
57
+ # Validate planning strategy: planning_llm must be provided if planning_strategy is set
58
+ if values.get('planning_llm') is None:
59
+ raise ValueError('planning_llm must be provided when planning_strategy is set.')
60
+
61
+ return values
62
+
63
+
64
+ class MultiLLMPlanConfig(TTCStrategyBaseConfig, name="multi_llm_plan"):
65
+ """Configuration for a 'multi LLM plan generation' strategy."""
66
+ llms: list[LLMRef] = Field(
67
+ default_factory=list,
68
+ description="list of LLMs to use for plan generation. Each LLM can generate one or more plans.")
69
+ plans_per_llm: int = Field(default=2, description="Number of plans each LLM should generate.")
70
+ max_temperature: float = Field(default=1.0,
71
+ description="Maximum temperature to use for sampling when generating plans. "
72
+ "This can help control the randomness of the generated plans.")
73
+ min_temperature: float = Field(default=0.5,
74
+ description="Minimum temperature to use for sampling when generating plans. "
75
+ "This can help control the randomness of the generated plans.")
76
+ planning_template: str = Field(
77
+ default=("You are an expert reasoning model task with creating a detailed execution plan"
78
+ " for a system that has the following information to get the result of a given input:\n\n"
79
+ "**System Information:**\n {context}"
80
+ "**Input:** \n{prompt}\n\n"
81
+ "An example plan could look like this:\n\n"
82
+ "1. Call tool A with input X\n"
83
+ "2. Call tool B with input Y\n"
84
+ "3. Interpret the output of tool A and B\n"
85
+ "4. Return the final result"
86
+ "\n\nBegin the final plan with PLAN:\n"),
87
+ description="The template to use for generating plans.")
88
+
89
+ @model_validator(mode="before")
90
+ def validate_multi_llm_strategies(cls, values: dict) -> dict:
91
+ if not values.get('llms'):
92
+ raise ValueError('Must provide at least one LLMRef in `llms` for multi-LLM strategy.')
93
+ return values
94
+
95
+
96
+ class MultiQueryRetrievalSearchConfig(TTCStrategyBaseConfig, name="multi_query_retrieval_search"):
97
+ """
98
+ Configuration for the MultiQueryRetrievalSearch strategy.
99
+ This strategy generates multiple new 'TTCItem's per original item,
100
+ each containing a differently phrased or re-focused version of the original task.
101
+ """
102
+ llms: list[LLMRef] = Field(default_factory=list,
103
+ description="list of LLM references to use for generating diverse queries.")
104
+
105
+ query_generation_template: str = Field(
106
+ default=("You are an expert at re-framing a user's query to encourage new solution paths. "
107
+ "Given the task description and an optional motivation, produce a short alternative query "
108
+ "that addresses the same task from a different angle. By generating multiple "
109
+ "perspectives on the task, your goal is to help "
110
+ "the user overcome some of the limitations of distance-based similarity search.\n\n"
111
+ "Task: {task}\n"
112
+ "Motivation: {motivation}\n\n"
113
+ "Output a concise new query statement below. Only output the revised query and nothing else.\n"),
114
+ description="Prompt template for rewriting the task from a different perspective.")
115
+
116
+ @model_validator(mode="before")
117
+ def validate_llms(cls, values):
118
+ if not values.get('llms'):
119
+ raise ValueError("At least one LLMRef must be provided for multi_query_retrieval_search.")
120
+ return values
@@ -0,0 +1,154 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License..
15
+
16
+ import typing
17
+
18
+ from pydantic import Field
19
+ from pydantic import model_validator
20
+
21
+ from nat.data_models.component_ref import LLMRef
22
+ from nat.data_models.ttc_strategy import TTCStrategyBaseConfig
23
+
24
+
25
+ class LLMBasedPlanSelectionConfig(TTCStrategyBaseConfig, name="llm_based_plan_selection"):
26
+ """
27
+ Configuration for LLMBasedSelection.
28
+ """
29
+ selection_llm: LLMRef | typing.Any | None = Field(
30
+ default=None,
31
+ description="The LLM to use for selecting the best plan. This can be an instance of an LLM client.")
32
+
33
+ selection_template: str = Field(
34
+ default=("You are tasked with selecting the best plan from several alternative plans."
35
+ " Review the following plans and their feedback carefully to select the most "
36
+ "comprehensive, efficient, and effective one."
37
+ "The plan is for an agent system with the following objective and context:\n\n"
38
+ "{context}\n\n"
39
+ "The system is asked to achieve the following goal:\n\n"
40
+ "{original_prompt}\n\n"
41
+ "The generated plans are as follows."
42
+ "\n\n{plans}"
43
+ "\n\nBased on your analysis, which plan (numbered 1 and onwards) is the best? "
44
+ "Provide a thorough explanation of your choice,"
45
+ " referencing specific strengths from the feedback and how they outweigh any weaknesses."
46
+ "Make sure you begin your choice of selected plan with the words 'SELECTED PLAN:' "
47
+ "followed by the plan number."),
48
+ description="The template to use for selecting the best plan. This should guide the LLM on how to evaluate "
49
+ "the plans and select the best one. Ensure it is clear and concise.")
50
+
51
+ @model_validator(mode="before")
52
+ def validate_strategies(cls, values: dict[str, typing.Any]) -> dict[str, typing.Any]:
53
+ """
54
+ Ensure that the selection_llm is provided when using LLMBasedSelection.
55
+ """
56
+ if values.get('selection_llm') is None:
57
+ raise ValueError('selection_llm must be provided when'
58
+ ' selection_strategy is set to LLM_BASED_PLAN_SELECTION.')
59
+
60
+ return values
61
+
62
+
63
+ class LLMBasedAgentOutputSelectionConfig(TTCStrategyBaseConfig, name="llm_based_agent_output_selection"):
64
+ """
65
+ Configuration for LLMBasedSelection.
66
+ """
67
+ selection_llm: LLMRef | typing.Any | None = Field(
68
+ default=None,
69
+ description="The LLM to use for selecting the best plan. This can be an instance of an LLM client.")
70
+
71
+ selection_template: str = Field(
72
+ default=("You are tasked with selecting the best output from several output."
73
+ "The outputs are from an agent system whose object and input will be provided below.\n "
74
+ "Review all the outputs and select one that fits the best. You will do this by "
75
+ "looking at how many outputs have the same classification. Chose the one that has the most. "
76
+ "Of the ones that have the same classification, choose the one that is the most complete, "
77
+ "clear, and comprehensive. The objective of the agent is: \n"
78
+ "{objective}\n\n"
79
+ "\n\nThe agent is asked to achieve the following goal:\n\n"
80
+ "{input}\n\n"
81
+ "The generated outputs are as follows."
82
+ "\n\n{results}"
83
+ "\n\nBased on your analysis, which plan (numbered 1 and onwards) is the best? "
84
+ "Provide a thorough explanation of your choice,"
85
+ " referencing specific strengths from the feedback and how they outweigh any weaknesses."
86
+ "You must ALWAYS select an option, even if the options are identical or similar. "
87
+ "Make sure you begin your choice of selected plan with the words 'SELECTED ITEM:' "
88
+ "followed by the plan number."),
89
+ description="The template to use for selecting the best output. This should guide the LLM on how to evaluate "
90
+ "the outputs and select the best one. Ensure it is clear and concise. Must contain {objective}, "
91
+ "{input}, and {results} ")
92
+
93
+ @model_validator(mode="before")
94
+ def validate_strategies(cls, values: dict[str, typing.Any]) -> dict[str, typing.Any]:
95
+ """
96
+ Ensure that the selection_llm is provided when using LLMBasedSelection.
97
+ """
98
+ if values.get('selection_llm') is None:
99
+ raise ValueError('selection_llm must be provided when '
100
+ 'selection_strategy is set to LLM_BASED_AGENT_OUTPUT_SELECTION.')
101
+
102
+ return values
103
+
104
+
105
+ class LLMBasedOutputMergingConfig(TTCStrategyBaseConfig, name="llm_based_agent_output_merging"):
106
+ """
107
+ Configuration for LLMBasedSelection.
108
+ """
109
+ selection_llm: LLMRef | typing.Any | None = Field(
110
+ default=None,
111
+ description="The LLM to use for selecting the best plan. This can be an instance of an LLM client.")
112
+
113
+ selection_template: str = Field(
114
+ default=("You are tasked with merging the output of an agent systems that produces {pipeline_type}."
115
+ "The outputs are from an agent system whose objective and input will be provided below.\n "
116
+ "Review all the outputs, please combine them all into one output, keeping with the intended structure "
117
+ "generated by the outputs and general tone. Capture the important pieces of each of the outputs "
118
+ "to create comprehensive output that achieves the input and objective. "
119
+ "The objective of the agent is: \n"
120
+ "{objective}\n\n"
121
+ "\n\nThe agent is asked to achieve the following goal:\n\n"
122
+ "{input}\n\n"
123
+ "The generated outputs are as follows."
124
+ "\n\n{results}"
125
+ "\n\n Make sure you begin your updated output with the words 'MERGED OUTPUT:' "),
126
+ description="The template to use for selecting the best output. This should guide the LLM on how to evaluate "
127
+ "the outputs and select the best one. Ensure it is clear and concise. Must contain {objective}, "
128
+ "{input}, and {results} ")
129
+
130
+ @model_validator(mode="before")
131
+ def validate_strategies(cls, values: dict[str, typing.Any]) -> dict[str, typing.Any]:
132
+ """
133
+ Ensure that the selection_llm is provided when using LLMBasedSelection.
134
+ """
135
+ if values.get('selection_llm') is None:
136
+ raise ValueError('selection_llm must be provided when '
137
+ 'selection_strategy is set to LLM_BASED_AGENT_OUTPUT_SELECTION.')
138
+
139
+ return values
140
+
141
+
142
+ class ThresholdSelectionConfig(TTCStrategyBaseConfig, name="threshold_selection"):
143
+ """
144
+ Configuration for a selection strategy that keeps only the items
145
+ whose scores exceed a specified threshold.
146
+ """
147
+ threshold: float = Field(default=5.0, description="Only keep TTCItems with score >= this value.")
148
+
149
+
150
+ class BestOfNSelectionConfig(TTCStrategyBaseConfig, name="best_of_n_selection"):
151
+ """
152
+ Configuration for Best of N Selection
153
+ """
154
+ pass