aiqtoolkit 1.2.0rc2__py3-none-any.whl → 1.2.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiqtoolkit might be problematic. Click here for more details.

Files changed (30) hide show
  1. aiq/agent/base.py +8 -7
  2. aiq/agent/react_agent/agent.py +2 -3
  3. aiq/agent/react_agent/register.py +1 -1
  4. aiq/agent/reasoning_agent/reasoning_agent.py +2 -1
  5. aiq/agent/tool_calling_agent/register.py +2 -1
  6. aiq/builder/function.py +21 -6
  7. aiq/builder/function_base.py +6 -2
  8. aiq/cli/commands/sizing/calc.py +6 -3
  9. aiq/cli/commands/start.py +0 -5
  10. aiq/cli/commands/uninstall.py +2 -4
  11. aiq/data_models/api_server.py +6 -12
  12. aiq/front_ends/console/console_front_end_plugin.py +2 -22
  13. aiq/front_ends/simple_base/simple_front_end_plugin_base.py +4 -2
  14. aiq/observability/exporter/processing_exporter.py +99 -46
  15. aiq/observability/exporter/span_exporter.py +1 -0
  16. aiq/observability/processor/batching_processor.py +52 -59
  17. aiq/observability/processor/callback_processor.py +42 -0
  18. aiq/observability/processor/processor.py +4 -1
  19. aiq/profiler/calc/calc_runner.py +5 -1
  20. aiq/profiler/calc/data_models.py +18 -6
  21. aiq/runtime/loader.py +2 -2
  22. aiq/tool/server_tools.py +1 -1
  23. aiq/utils/type_converter.py +52 -10
  24. {aiqtoolkit-1.2.0rc2.dist-info → aiqtoolkit-1.2.0rc3.dist-info}/METADATA +1 -1
  25. {aiqtoolkit-1.2.0rc2.dist-info → aiqtoolkit-1.2.0rc3.dist-info}/RECORD +30 -29
  26. {aiqtoolkit-1.2.0rc2.dist-info → aiqtoolkit-1.2.0rc3.dist-info}/WHEEL +0 -0
  27. {aiqtoolkit-1.2.0rc2.dist-info → aiqtoolkit-1.2.0rc3.dist-info}/entry_points.txt +0 -0
  28. {aiqtoolkit-1.2.0rc2.dist-info → aiqtoolkit-1.2.0rc3.dist-info}/licenses/LICENSE-3rd-party.txt +0 -0
  29. {aiqtoolkit-1.2.0rc2.dist-info → aiqtoolkit-1.2.0rc3.dist-info}/licenses/LICENSE.md +0 -0
  30. {aiqtoolkit-1.2.0rc2.dist-info → aiqtoolkit-1.2.0rc3.dist-info}/top_level.txt +0 -0
aiq/agent/base.py CHANGED
@@ -148,7 +148,7 @@ class BaseAgent(ABC):
148
148
  """
149
149
  last_exception = None
150
150
 
151
- for attempt in range(max_retries + 1):
151
+ for attempt in range(1, max_retries + 1):
152
152
  try:
153
153
  response = await tool.ainvoke(tool_input, config=config)
154
154
 
@@ -162,17 +162,18 @@ class BaseAgent(ABC):
162
162
 
163
163
  except Exception as e:
164
164
  last_exception = e
165
- logger.warning("%s Tool call attempt %d/%d failed for tool %s: %s",
166
- AGENT_LOG_PREFIX,
167
- attempt + 1,
168
- max_retries + 1,
169
- tool.name,
170
- str(e))
171
165
 
172
166
  # If this was the last attempt, don't sleep
173
167
  if attempt == max_retries:
174
168
  break
175
169
 
170
+ logger.warning("%s Tool call attempt %d/%d failed for tool %s: %s",
171
+ AGENT_LOG_PREFIX,
172
+ attempt,
173
+ max_retries,
174
+ tool.name,
175
+ str(e))
176
+
176
177
  # Exponential backoff: 2^attempt seconds
177
178
  sleep_time = 2**attempt
178
179
  logger.debug("%s Retrying tool call for %s in %d seconds...", AGENT_LOG_PREFIX, tool.name, sleep_time)
@@ -193,12 +193,11 @@ class ReActAgentGraph(DualNodeAgent):
193
193
  ex.observation,
194
194
  output_message.content)
195
195
  if attempt == self.parse_agent_response_max_retries:
196
- logger.error(
196
+ logger.warning(
197
197
  "%s Failed to parse agent output after %d attempts, consider enabling or "
198
198
  "increasing parse_agent_response_max_retries",
199
199
  AGENT_LOG_PREFIX,
200
- attempt,
201
- exc_info=True)
200
+ attempt)
202
201
  # the final answer goes in the "messages" state channel
203
202
  combined_content = str(ex.observation) + '\n' + str(output_message.content)
204
203
  output_message.content = combined_content
@@ -18,7 +18,6 @@ import logging
18
18
  from pydantic import AliasChoices
19
19
  from pydantic import Field
20
20
 
21
- from aiq.agent.base import AGENT_LOG_PREFIX
22
21
  from aiq.builder.builder import Builder
23
22
  from aiq.builder.framework_enum import LLMFrameworkEnum
24
23
  from aiq.builder.function_info import FunctionInfo
@@ -79,6 +78,7 @@ async def react_agent_workflow(config: ReActAgentWorkflowConfig, builder: Builde
79
78
  from langchain_core.messages import trim_messages
80
79
  from langgraph.graph.graph import CompiledGraph
81
80
 
81
+ from aiq.agent.base import AGENT_LOG_PREFIX
82
82
  from aiq.agent.react_agent.agent import ReActAgentGraph
83
83
  from aiq.agent.react_agent.agent import ReActGraphState
84
84
  from aiq.agent.react_agent.agent import create_react_agent_prompt
@@ -19,7 +19,6 @@ from collections.abc import AsyncGenerator
19
19
 
20
20
  from pydantic import Field
21
21
 
22
- from aiq.agent.base import AGENT_LOG_PREFIX
23
22
  from aiq.builder.builder import Builder
24
23
  from aiq.builder.framework_enum import LLMFrameworkEnum
25
24
  from aiq.builder.function_info import FunctionInfo
@@ -86,6 +85,8 @@ async def build_reasoning_function(config: ReasoningFunctionConfig, builder: Bui
86
85
  from langchain_core.language_models import BaseChatModel
87
86
  from langchain_core.prompts import PromptTemplate
88
87
 
88
+ from aiq.agent.base import AGENT_LOG_PREFIX
89
+
89
90
  def remove_r1_think_tags(text: str):
90
91
  pattern = r'(<think>)?.*?</think>\s*(.*)'
91
92
 
@@ -17,7 +17,6 @@ import logging
17
17
 
18
18
  from pydantic import Field
19
19
 
20
- from aiq.agent.base import AGENT_LOG_PREFIX
21
20
  from aiq.builder.builder import Builder
22
21
  from aiq.builder.framework_enum import LLMFrameworkEnum
23
22
  from aiq.builder.function_info import FunctionInfo
@@ -49,6 +48,8 @@ async def tool_calling_agent_workflow(config: ToolCallAgentWorkflowConfig, build
49
48
  from langchain_core.messages.human import HumanMessage
50
49
  from langgraph.graph.graph import CompiledGraph
51
50
 
51
+ from aiq.agent.base import AGENT_LOG_PREFIX
52
+
52
53
  from .agent import ToolCallAgentGraph
53
54
  from .agent import ToolCallAgentGraphState
54
55
 
aiq/builder/function.py CHANGED
@@ -76,11 +76,16 @@ class Function(FunctionBase[InputT, StreamingOutputT, SingleOutputT], ABC):
76
76
  -------
77
77
  _T
78
78
  The converted value.
79
+
80
+ Raises
81
+ ------
82
+ ValueError
83
+ If the value cannot be converted to the specified type (when `to_type` is specified).
79
84
  """
80
85
 
81
86
  return self._converter.convert(value, to_type=to_type)
82
87
 
83
- def try_convert(self, value: typing.Any, to_type: type[_T]) -> _T:
88
+ def try_convert(self, value: typing.Any, to_type: type[_T]) -> _T | typing.Any:
84
89
  """
85
90
  Converts the given value to the specified type using graceful error handling.
86
91
  If conversion fails, returns the original value and continues processing.
@@ -94,7 +99,7 @@ class Function(FunctionBase[InputT, StreamingOutputT, SingleOutputT], ABC):
94
99
 
95
100
  Returns
96
101
  -------
97
- _T
102
+ _T | typing.Any
98
103
  The converted value, or original value if conversion fails.
99
104
  """
100
105
  return self._converter.try_convert(value, to_type=to_type)
@@ -129,17 +134,22 @@ class Function(FunctionBase[InputT, StreamingOutputT, SingleOutputT], ABC):
129
134
  -------
130
135
  typing.Any
131
136
  The output of the function optionally converted to the specified type.
137
+
138
+ Raises
139
+ ------
140
+ ValueError
141
+ If the output of the function cannot be converted to the specified type.
132
142
  """
133
143
 
134
144
  with self._context.push_active_function(self.instance_name,
135
145
  input_data=value) as manager: # Set the current invocation context
136
146
  try:
137
- converted_input: InputT = self._convert_input(value) # type: ignore
147
+ converted_input: InputT = self._convert_input(value)
138
148
 
139
149
  result = await self._ainvoke(converted_input)
140
150
 
141
151
  if to_type is not None and not isinstance(result, to_type):
142
- result = self._converter.try_convert(result, to_type=to_type)
152
+ result = self.convert(result, to_type)
143
153
 
144
154
  manager.set_output(result)
145
155
 
@@ -215,18 +225,23 @@ class Function(FunctionBase[InputT, StreamingOutputT, SingleOutputT], ABC):
215
225
  ------
216
226
  typing.Any
217
227
  The output of the function optionally converted to the specified type.
228
+
229
+ Raises
230
+ ------
231
+ ValueError
232
+ If the output of the function cannot be converted to the specified type (when `to_type` is specified).
218
233
  """
219
234
 
220
235
  with self._context.push_active_function(self.instance_name, input_data=value) as manager:
221
236
  try:
222
- converted_input: InputT = self._convert_input(value) # type: ignore
237
+ converted_input: InputT = self._convert_input(value)
223
238
 
224
239
  # Collect streaming outputs to capture the final result
225
240
  final_output: list[typing.Any] = []
226
241
 
227
242
  async for data in self._astream(converted_input):
228
243
  if to_type is not None and not isinstance(data, to_type):
229
- converted_data = self._converter.try_convert(data, to_type=to_type)
244
+ converted_data = self.convert(data, to_type=to_type)
230
245
  final_output.append(converted_data)
231
246
  yield converted_data
232
247
  else:
@@ -350,7 +350,7 @@ class FunctionBase(typing.Generic[InputT, StreamingOutputT, SingleOutputT], ABC)
350
350
  # output because the ABC has it.
351
351
  return True
352
352
 
353
- def _convert_input(self, value: typing.Any):
353
+ def _convert_input(self, value: typing.Any) -> InputT:
354
354
  if (isinstance(value, self.input_class)):
355
355
  return value
356
356
 
@@ -373,4 +373,8 @@ class FunctionBase(typing.Generic[InputT, StreamingOutputT, SingleOutputT], ABC)
373
373
  return value
374
374
 
375
375
  # Fallback to the converter
376
- return self._converter.try_convert(value, to_type=self.input_class)
376
+ try:
377
+ return self._converter.convert(value, to_type=self.input_class)
378
+ except ValueError as e:
379
+ # Input parsing should yield a TypeError instead of a ValueError
380
+ raise TypeError from e
@@ -274,9 +274,12 @@ def calc_command(ctx,
274
274
 
275
275
  click.echo(tabulate(table, headers=headers, tablefmt="github"))
276
276
 
277
- # Display slope-based GPU estimates at the end
278
- click.echo("") # Add blank line for separation
279
- click.echo(click.style("=== GPU ESTIMATES ===", fg="bright_blue", bold=True))
277
+ # Display slope-based GPU estimates if they are available
278
+ if results.gpu_estimates.gpu_estimate_by_llm_latency is not None or \
279
+ results.gpu_estimates.gpu_estimate_by_wf_runtime is not None:
280
+ click.echo("")
281
+ click.echo(click.style("=== GPU ESTIMATES ===", fg="bright_blue", bold=True))
282
+
280
283
  if results.gpu_estimates.gpu_estimate_by_wf_runtime is not None:
281
284
  click.echo(
282
285
  click.style(
aiq/cli/commands/start.py CHANGED
@@ -190,11 +190,6 @@ class StartCommandGroup(click.Group):
190
190
  # Override default front end config with values from the config file for serverless execution modes.
191
191
  # Check that we have the right kind of front end
192
192
  if (not isinstance(config.general.front_end, front_end.config_type)):
193
- logger.warning(
194
- "The front end type in the config file (%s) does not match the command name (%s). "
195
- "Overwriting the config file front end.",
196
- config.general.front_end.type,
197
- cmd_name)
198
193
 
199
194
  # Set the front end config
200
195
  config.general.front_end = front_end.config_type()
@@ -53,13 +53,11 @@ async def uninstall_packages(packages: list[dict[str, str]]) -> None:
53
53
  await stack.enter_async_context(registry_handler.remove(packages=package_name_list))
54
54
 
55
55
 
56
- @click.group(name=__name__,
57
- invoke_without_command=True,
58
- help=("Uninstall an AIQ Toolkit plugin packages from the local environment."))
56
+ @click.group(name=__name__, invoke_without_command=True, help=("Uninstall plugin packages from the local environment."))
59
57
  @click.argument("packages", type=str)
60
58
  def uninstall_command(packages: str) -> None:
61
59
  """
62
- Uninstall AIQ Toolkit plugin packages from the local environment.
60
+ Uninstall plugin packages from the local environment.
63
61
  """
64
62
 
65
63
  packages = packages.split()
@@ -121,28 +121,22 @@ class AIQChatRequest(BaseModel):
121
121
  # Optional fields (OpenAI Chat Completions API compatible)
122
122
  model: str | None = Field(default=None, description="name of the model to use")
123
123
  frequency_penalty: float | None = Field(default=0.0,
124
- ge=-2.0,
125
- le=2.0,
126
124
  description="Penalty for new tokens based on frequency in text")
127
125
  logit_bias: dict[str, float] | None = Field(default=None,
128
126
  description="Modify likelihood of specified tokens appearing")
129
127
  logprobs: bool | None = Field(default=None, description="Whether to return log probabilities")
130
- top_logprobs: int | None = Field(default=None, ge=0, le=20, description="Number of most likely tokens to return")
131
- max_tokens: int | None = Field(default=None, ge=1, description="Maximum number of tokens to generate")
132
- n: int | None = Field(default=1, ge=1, le=128, description="Number of chat completion choices to generate")
133
- presence_penalty: float | None = Field(default=0.0,
134
- ge=-2.0,
135
- le=2.0,
136
- description="Penalty for new tokens based on presence in text")
128
+ top_logprobs: int | None = Field(default=None, description="Number of most likely tokens to return")
129
+ max_tokens: int | None = Field(default=None, description="Maximum number of tokens to generate")
130
+ n: int | None = Field(default=1, description="Number of chat completion choices to generate")
131
+ presence_penalty: float | None = Field(default=0.0, description="Penalty for new tokens based on presence in text")
137
132
  response_format: dict[str, typing.Any] | None = Field(default=None, description="Response format specification")
138
133
  seed: int | None = Field(default=None, description="Random seed for deterministic sampling")
139
134
  service_tier: typing.Literal["auto", "default"] | None = Field(default=None,
140
135
  description="Service tier for the request")
141
- stop: str | list[str] | None = Field(default=None, description="Up to 4 sequences where API will stop generating")
142
136
  stream: bool | None = Field(default=False, description="Whether to stream partial message deltas")
143
137
  stream_options: dict[str, typing.Any] | None = Field(default=None, description="Options for streaming")
144
- temperature: float | None = Field(default=1.0, ge=0.0, le=2.0, description="Sampling temperature between 0 and 2")
145
- top_p: float | None = Field(default=None, ge=0.0, le=1.0, description="Nucleus sampling parameter")
138
+ temperature: float | None = Field(default=1.0, description="Sampling temperature between 0 and 2")
139
+ top_p: float | None = Field(default=None, description="Nucleus sampling parameter")
146
140
  tools: list[dict[str, typing.Any]] | None = Field(default=None, description="List of tools the model may call")
147
141
  tool_choice: str | dict[str, typing.Any] | None = Field(default=None, description="Controls which tool is called")
148
142
  parallel_tool_calls: bool | None = Field(default=True, description="Whether to enable parallel function calling")
@@ -15,12 +15,10 @@
15
15
 
16
16
  import asyncio
17
17
  import logging
18
- from io import StringIO
19
18
 
20
19
  import click
21
20
  from colorama import Fore
22
21
 
23
- from aiq.builder.workflow_builder import WorkflowBuilder
24
22
  from aiq.data_models.interactive import HumanPromptModelType
25
23
  from aiq.data_models.interactive import HumanResponse
26
24
  from aiq.data_models.interactive import HumanResponseText
@@ -61,27 +59,9 @@ class ConsoleFrontEndPlugin(SimpleFrontEndPluginBase[ConsoleFrontEndConfig]):
61
59
  if (not self.front_end_config.input_query and not self.front_end_config.input_file):
62
60
  raise click.UsageError("Must specify either --input_query or --input_file")
63
61
 
64
- async def run(self):
65
-
66
- # Must yield the workflow function otherwise it cleans up
67
- async with WorkflowBuilder.from_config(config=self.full_config) as builder:
68
-
69
- session_manager: AIQSessionManager = None
70
-
71
- if logger.isEnabledFor(logging.INFO):
72
- stream = StringIO()
73
-
74
- self.full_config.print_summary(stream=stream)
75
-
76
- click.echo(stream.getvalue())
77
-
78
- workflow = builder.build()
79
- session_manager = AIQSessionManager(workflow)
80
-
81
- await self.run_workflow(session_manager)
82
-
83
- async def run_workflow(self, session_manager: AIQSessionManager = None):
62
+ async def run_workflow(self, session_manager: AIQSessionManager):
84
63
 
64
+ assert session_manager is not None, "Session manager must be provided"
85
65
  runner_outputs = None
86
66
 
87
67
  if (self.front_end_config.input_query):
@@ -45,8 +45,10 @@ class SimpleFrontEndPluginBase(FrontEndBase[FrontEndConfigT], ABC):
45
45
 
46
46
  click.echo(stream.getvalue())
47
47
 
48
- await self.run_workflow(builder.build())
48
+ workflow = builder.build()
49
+ session_manager = AIQSessionManager(workflow)
50
+ await self.run_workflow(session_manager)
49
51
 
50
52
  @abstractmethod
51
- async def run_workflow(self, session_manager: AIQSessionManager = None):
53
+ async def run_workflow(self, session_manager: AIQSessionManager):
52
54
  pass
@@ -17,6 +17,7 @@ import asyncio
17
17
  import logging
18
18
  from abc import abstractmethod
19
19
  from collections.abc import Coroutine
20
+ from typing import Any
20
21
  from typing import Generic
21
22
  from typing import TypeVar
22
23
 
@@ -24,6 +25,7 @@ from aiq.builder.context import AIQContextState
24
25
  from aiq.data_models.intermediate_step import IntermediateStep
25
26
  from aiq.observability.exporter.base_exporter import BaseExporter
26
27
  from aiq.observability.mixin.type_introspection_mixin import TypeIntrospectionMixin
28
+ from aiq.observability.processor.callback_processor import CallbackProcessor
27
29
  from aiq.observability.processor.processor import Processor
28
30
  from aiq.utils.type_utils import DecomposedType
29
31
  from aiq.utils.type_utils import override
@@ -89,6 +91,14 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
89
91
  self._processors[-1].output_type)
90
92
  self._processors.append(processor)
91
93
 
94
+ # Set up pipeline continuation callback for processors that support it
95
+ if isinstance(processor, CallbackProcessor):
96
+ # Create a callback that continues processing through the rest of the pipeline
97
+ async def pipeline_callback(item):
98
+ await self._continue_pipeline_after(processor, item)
99
+
100
+ processor.set_done_callback(pipeline_callback)
101
+
92
102
  def remove_processor(self, processor: Processor) -> None:
93
103
  """Remove a processor from the processing pipeline.
94
104
 
@@ -143,20 +153,82 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
143
153
  """Process item through all registered processors.
144
154
 
145
155
  Args:
146
- item: The item to process (starts as PipelineInputT, can transform to PipelineOutputT)
156
+ item (PipelineInputT): The item to process (starts as PipelineInputT, can transform to PipelineOutputT)
157
+
158
+ Returns:
159
+ PipelineOutputT: The processed item after running through all processors
160
+ """
161
+ return await self._process_through_processors(self._processors, item) # type: ignore
162
+
163
+ async def _process_through_processors(self, processors: list[Processor], item: Any) -> Any:
164
+ """Process an item through a list of processors.
165
+
166
+ Args:
167
+ processors (list[Processor]): List of processors to run the item through
168
+ item (Any): The item to process
147
169
 
148
170
  Returns:
149
171
  The processed item after running through all processors
150
172
  """
151
173
  processed_item = item
152
- for processor in self._processors:
174
+ for processor in processors:
153
175
  try:
154
176
  processed_item = await processor.process(processed_item)
155
177
  except Exception as e:
156
178
  logger.error("Error in processor %s: %s", processor.__class__.__name__, e, exc_info=True)
157
- # Continue with unprocessed item rather than failing the export
179
+ # Continue with unprocessed item rather than failing
180
+ return processed_item
181
+
182
+ async def _export_final_item(self, processed_item: Any, raise_on_invalid: bool = False) -> None:
183
+ """Export a processed item with proper type handling.
184
+
185
+ Args:
186
+ processed_item (Any): The item to export
187
+ raise_on_invalid (bool): If True, raise ValueError for invalid types instead of logging warning
188
+ """
189
+ if isinstance(processed_item, list):
190
+ if len(processed_item) > 0:
191
+ await self.export_processed(processed_item)
192
+ else:
193
+ logger.debug("Skipping export of empty batch")
194
+ elif isinstance(processed_item, self.output_class):
195
+ await self.export_processed(processed_item)
196
+ else:
197
+ if raise_on_invalid:
198
+ raise ValueError(f"Processed item {processed_item} is not a valid output type. "
199
+ f"Expected {self.output_class} or list[{self.output_class}]")
200
+ logger.warning("Processed item %s is not a valid output type for export", processed_item)
201
+
202
+ async def _continue_pipeline_after(self, source_processor: Processor, item: Any) -> None:
203
+ """Continue processing an item through the pipeline after a specific processor.
158
204
 
159
- return processed_item # type: ignore
205
+ This is used when processors (like BatchingProcessor) need to inject items
206
+ back into the pipeline flow to continue through downstream processors.
207
+
208
+ Args:
209
+ source_processor (Processor): The processor that generated the item
210
+ item (Any): The item to continue processing through the remaining pipeline
211
+ """
212
+ try:
213
+ # Find the source processor's position
214
+ try:
215
+ source_index = self._processors.index(source_processor)
216
+ except ValueError:
217
+ logger.error("Source processor %s not found in pipeline", source_processor.__class__.__name__)
218
+ return
219
+
220
+ # Process through remaining processors (skip the source processor)
221
+ remaining_processors = self._processors[source_index + 1:]
222
+ processed_item = await self._process_through_processors(remaining_processors, item)
223
+
224
+ # Export the final result
225
+ await self._export_final_item(processed_item)
226
+
227
+ except Exception as e:
228
+ logger.error("Failed to continue pipeline processing after %s: %s",
229
+ source_processor.__class__.__name__,
230
+ e,
231
+ exc_info=True)
160
232
 
161
233
  async def _export_with_processing(self, item: PipelineInputT) -> None:
162
234
  """Export an item after processing it through the pipeline.
@@ -169,20 +241,11 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
169
241
  final_item: PipelineOutputT = await self._process_pipeline(item)
170
242
 
171
243
  # Handle different output types from batch processors
172
- if isinstance(final_item, list):
173
- # Empty lists from batch processors should be skipped, not exported
174
- if len(final_item) == 0:
175
- logger.debug("Skipping export of empty batch from processor pipeline")
176
- return
177
-
178
- # Non-empty lists should be exported (batch processors)
179
- await self.export_processed(final_item)
180
- elif isinstance(final_item, self.output_class):
181
- # Single items should be exported normally
182
- await self.export_processed(final_item)
183
- else:
184
- raise ValueError(f"Processed item {final_item} is not a valid output type. "
185
- f"Expected {self.output_class} or list[{self.output_class}]")
244
+ if isinstance(final_item, list) and len(final_item) == 0:
245
+ logger.debug("Skipping export of empty batch from processor pipeline")
246
+ return
247
+
248
+ await self._export_final_item(final_item, raise_on_invalid=True)
186
249
 
187
250
  except Exception as e:
188
251
  logger.error("Failed to export item '%s': %s", item, e, exc_info=True)
@@ -235,35 +298,25 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
235
298
 
236
299
  @override
237
300
  async def _cleanup(self):
238
- """Enhanced cleanup that shuts down all shutdown-aware processors."""
301
+ """Enhanced cleanup that shuts down all shutdown-aware processors.
302
+
303
+ Each processor is responsible for its own cleanup, including routing
304
+ any final batches through the remaining pipeline via their done callbacks.
305
+ """
239
306
  # Shutdown all processors that support it
240
- if hasattr(self, '_processors'):
241
- shutdown_tasks = []
242
- for processor in getattr(self, '_processors', []):
243
- if hasattr(processor, 'shutdown'):
244
- logger.debug("Shutting down processor: %s", processor.__class__.__name__)
245
- shutdown_tasks.append(processor.shutdown())
246
-
247
- if shutdown_tasks:
248
- try:
249
- await asyncio.gather(*shutdown_tasks, return_exceptions=True)
250
- logger.info("Successfully shut down %d processors", len(shutdown_tasks))
251
- except Exception as e:
252
- logger.error("Error shutting down processors: %s", e, exc_info=True)
253
-
254
- # Process final batches from batch processors
255
- for processor in getattr(self, '_processors', []):
256
- if hasattr(processor, 'has_final_batch') and hasattr(processor, 'get_final_batch'):
257
- if processor.has_final_batch():
258
- final_batch = processor.get_final_batch()
259
- if final_batch:
260
- logger.info("Processing final batch of %d items from %s during cleanup",
261
- len(final_batch),
262
- processor.__class__.__name__)
263
- try:
264
- await self.export_processed(final_batch)
265
- except Exception as e:
266
- logger.error("Error processing final batch during cleanup: %s", e, exc_info=True)
307
+ shutdown_tasks = []
308
+ for processor in getattr(self, '_processors', []):
309
+ shutdown_method = getattr(processor, 'shutdown', None)
310
+ if shutdown_method:
311
+ logger.debug("Shutting down processor: %s", processor.__class__.__name__)
312
+ shutdown_tasks.append(shutdown_method())
313
+
314
+ if shutdown_tasks:
315
+ try:
316
+ await asyncio.gather(*shutdown_tasks, return_exceptions=True)
317
+ logger.info("Successfully shut down %d processors", len(shutdown_tasks))
318
+ except Exception as e:
319
+ logger.error("Error shutting down processors: %s", e, exc_info=True)
267
320
 
268
321
  # Call parent cleanup
269
322
  await super()._cleanup()
@@ -262,3 +262,4 @@ class SpanExporter(ProcessingExporter[InputSpanT, OutputSpanT], SerializeMixin):
262
262
  self._outstanding_spans.clear() # type: ignore
263
263
  self._span_stack.clear() # type: ignore
264
264
  self._metadata_stack.clear() # type: ignore
265
+ await super()._cleanup()
@@ -23,17 +23,17 @@ from typing import Any
23
23
  from typing import Generic
24
24
  from typing import TypeVar
25
25
 
26
- from aiq.observability.processor.processor import Processor
26
+ from aiq.observability.processor.callback_processor import CallbackProcessor
27
27
 
28
28
  logger = logging.getLogger(__name__)
29
29
 
30
30
  T = TypeVar('T')
31
31
 
32
32
 
33
- class BatchingProcessor(Processor[T, list[T]], Generic[T]):
33
+ class BatchingProcessor(CallbackProcessor[T, list[T]], Generic[T]):
34
34
  """Pass-through batching processor that accumulates items and outputs batched lists.
35
35
 
36
- This processor fits properly into the generics design by implementing Processor[T, List[T]].
36
+ This processor extends CallbackProcessor[T, List[T]] to provide batching functionality.
37
37
  It accumulates individual items and outputs them as batches when size or time thresholds
38
38
  are met. The batched output continues through the processing pipeline.
39
39
 
@@ -43,25 +43,31 @@ class BatchingProcessor(Processor[T, list[T]], Generic[T]):
43
43
  Key Features:
44
44
  - Pass-through design: Processor[T, List[T]]
45
45
  - Size-based and time-based batching
46
- - Fits into generics processing pipeline design
46
+ - Pipeline flow: batches continue through downstream processors
47
47
  - GUARANTEED: No items lost during cleanup
48
48
  - Comprehensive statistics and monitoring
49
49
  - Proper cleanup and shutdown handling
50
50
  - High-performance async implementation
51
51
  - Back-pressure handling with queue limits
52
52
 
53
+ Pipeline Flow:
54
+ Normal processing: Individual items → BatchingProcessor → List[items] → downstream processors → export
55
+ Time-based flush: Scheduled batches automatically continue through remaining pipeline
56
+ Shutdown: Final batch immediately routed through remaining pipeline
57
+
53
58
  Cleanup Guarantee:
54
- When ProcessingExporter._cleanup() calls shutdown(), this processor:
59
+ When shutdown() is called, this processor:
55
60
  1. Stops accepting new items
56
- 2. Processes all queued items as final batch
57
- 3. Returns final batch to continue through pipeline
58
- 4. Ensures zero data loss during shutdown
61
+ 2. Creates final batch from all queued items
62
+ 3. Immediately routes final batch through remaining pipeline via callback
63
+ 4. Ensures zero data loss with no external coordination needed
59
64
 
60
65
  Usage in Pipeline:
61
66
  ```python
62
- # Individual spans → Batched spans → Continue processing
63
- exporter.add_processor(BatchingProcessor[Span](batch_size=100))
64
- exporter.add_processor(BatchedSpanProcessor()) # Processes List[Span]
67
+ # Individual spans → Batched spans → Continue through downstream processors
68
+ exporter.add_processor(BatchingProcessor[Span](batch_size=100)) # Auto-wired with pipeline callback
69
+ exporter.add_processor(FilterProcessor()) # Processes List[Span] from batching
70
+ exporter.add_processor(TransformProcessor()) # Further processing
65
71
  ```
66
72
 
67
73
  Args:
@@ -70,6 +76,10 @@ class BatchingProcessor(Processor[T, list[T]], Generic[T]):
70
76
  max_queue_size: Maximum items to queue before blocking (default: 1000)
71
77
  drop_on_overflow: If True, drop items when queue is full (default: False)
72
78
  shutdown_timeout: Max seconds to wait for final batch processing (default: 10.0)
79
+
80
+ Note:
81
+ The done_callback for pipeline integration is automatically set by ProcessingExporter
82
+ when the processor is added to a pipeline. For standalone usage, call set_done_callback().
73
83
  """
74
84
 
75
85
  def __init__(self,
@@ -77,14 +87,13 @@ class BatchingProcessor(Processor[T, list[T]], Generic[T]):
77
87
  flush_interval: float = 5.0,
78
88
  max_queue_size: int = 1000,
79
89
  drop_on_overflow: bool = False,
80
- shutdown_timeout: float = 10.0,
81
- done_callback: Callable[[list[T]], Awaitable[None]] | None = None):
90
+ shutdown_timeout: float = 10.0):
82
91
  self._batch_size = batch_size
83
92
  self._flush_interval = flush_interval
84
93
  self._max_queue_size = max_queue_size
85
94
  self._drop_on_overflow = drop_on_overflow
86
95
  self._shutdown_timeout = shutdown_timeout
87
- self._done_callback = done_callback
96
+ self._done_callback: Callable[[list[T]], Awaitable[None]] | None = None
88
97
 
89
98
  # Batching state
90
99
  self._batch_queue: deque[T] = deque()
@@ -93,11 +102,7 @@ class BatchingProcessor(Processor[T, list[T]], Generic[T]):
93
102
  self._batch_lock = asyncio.Lock()
94
103
  self._shutdown_requested = False
95
104
  self._shutdown_complete = False
96
- self._shutdown_complete_event: asyncio.Event | None = None
97
-
98
- # Final batch handling for cleanup
99
- self._final_batch: list[T] | None = None
100
- self._final_batch_processed = False
105
+ self._shutdown_complete_event = asyncio.Event()
101
106
 
102
107
  # Callback for immediate export of scheduled batches
103
108
  self._done = None
@@ -167,7 +172,11 @@ class BatchingProcessor(Processor[T, list[T]], Generic[T]):
167
172
  return []
168
173
 
169
174
  def set_done_callback(self, callback: Callable[[list[T]], Awaitable[None]]):
170
- """Set callback function for immediate export of scheduled batches."""
175
+ """Set callback function for routing batches through the remaining pipeline.
176
+
177
+ This is automatically set by ProcessingExporter.add_processor() to continue
178
+ batches through downstream processors before final export.
179
+ """
171
180
  self._done_callback = callback
172
181
 
173
182
  async def _schedule_flush(self):
@@ -178,15 +187,15 @@ class BatchingProcessor(Processor[T, list[T]], Generic[T]):
178
187
  if not self._shutdown_requested and self._batch_queue:
179
188
  batch = await self._create_batch()
180
189
  if batch:
181
- # Immediately export scheduled batches via callback
190
+ # Route scheduled batches through pipeline via callback
182
191
  if self._done_callback is not None:
183
192
  try:
184
193
  await self._done_callback(batch)
185
- logger.debug("Scheduled flush exported batch of %d items", len(batch))
194
+ logger.debug("Scheduled flush routed batch of %d items through pipeline", len(batch))
186
195
  except Exception as e:
187
- logger.error("Error exporting scheduled batch: %s", e, exc_info=True)
196
+ logger.error("Error routing scheduled batch through pipeline: %s", e, exc_info=True)
188
197
  else:
189
- logger.warning("Scheduled flush created batch of %d items but no export callback set",
198
+ logger.warning("Scheduled flush created batch of %d items but no pipeline callback set",
190
199
  len(batch))
191
200
  except asyncio.CancelledError:
192
201
  pass
@@ -223,11 +232,8 @@ class BatchingProcessor(Processor[T, list[T]], Generic[T]):
223
232
  """Shutdown the processor and ensure all items are processed.
224
233
 
225
234
  CRITICAL: This method is called by ProcessingExporter._cleanup() to ensure
226
- no items are lost during shutdown. It creates a final batch from any
227
- remaining items and stores it for processing.
228
-
229
- The final batch will be processed by the next process() call or can be
230
- retrieved via get_final_batch().
235
+ no items are lost during shutdown. It immediately routes any remaining
236
+ items as a final batch through the rest of the processing pipeline.
231
237
  """
232
238
  if self._shutdown_requested:
233
239
  logger.debug("Shutdown already requested, waiting for completion")
@@ -251,13 +257,26 @@ class BatchingProcessor(Processor[T, list[T]], Generic[T]):
251
257
  except asyncio.CancelledError:
252
258
  pass
253
259
 
254
- # Create final batch from remaining items
260
+ # Create and route final batch through pipeline
255
261
  async with self._batch_lock:
256
262
  if self._batch_queue:
257
- self._final_batch = await self._create_batch()
258
- logger.info("Created final batch of %d items during shutdown", len(self._final_batch))
263
+ final_batch = await self._create_batch()
264
+ logger.info("Created final batch of %d items during shutdown", len(final_batch))
265
+
266
+ # Route final batch through pipeline via callback
267
+ if self._done_callback is not None:
268
+ try:
269
+ await self._done_callback(final_batch)
270
+ logger.info("Successfully routed final batch of %d items through pipeline during shutdown",
271
+ len(final_batch))
272
+ except Exception as e:
273
+ logger.error("Error routing final batch through pipeline during shutdown: %s",
274
+ e,
275
+ exc_info=True)
276
+ else:
277
+ logger.warning("Final batch of %d items created during shutdown but no pipeline callback set",
278
+ len(final_batch))
259
279
  else:
260
- self._final_batch = []
261
280
  logger.info("No items remaining during shutdown")
262
281
 
263
282
  self._shutdown_complete = True
@@ -269,30 +288,6 @@ class BatchingProcessor(Processor[T, list[T]], Generic[T]):
269
288
  self._shutdown_complete = True
270
289
  self._shutdown_complete_event.set()
271
290
 
272
- def get_final_batch(self) -> list[T]:
273
- """Get the final batch created during shutdown.
274
-
275
- This method allows the exporter to retrieve and process any items
276
- that were queued when shutdown was called.
277
-
278
- Returns:
279
- List[T]: Final batch of items, empty list if none
280
- """
281
- if self._final_batch is not None:
282
- final_batch = self._final_batch
283
- self._final_batch = None # Clear to avoid double processing
284
- self._final_batch_processed = True
285
- return final_batch
286
- return []
287
-
288
- def has_final_batch(self) -> bool:
289
- """Check if there's a final batch waiting to be processed.
290
-
291
- Returns:
292
- bool: True if final batch exists and hasn't been processed
293
- """
294
- return self._final_batch is not None and not self._final_batch_processed
295
-
296
291
  def get_stats(self) -> dict[str, Any]:
297
292
  """Get comprehensive batching statistics."""
298
293
  return {
@@ -309,8 +304,6 @@ class BatchingProcessor(Processor[T, list[T]], Generic[T]):
309
304
  "shutdown_batches": self._shutdown_batches,
310
305
  "shutdown_requested": self._shutdown_requested,
311
306
  "shutdown_complete": self._shutdown_complete,
312
- "final_batch_size": len(self._final_batch) if self._final_batch else 0,
313
- "final_batch_processed": self._final_batch_processed,
314
307
  "avg_items_per_batch": self._items_processed / max(1, self._batches_created),
315
308
  "drop_rate": self._items_dropped / max(1, self._items_processed) * 100 if self._items_processed > 0 else 0
316
309
  }
@@ -0,0 +1,42 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from abc import abstractmethod
17
+ from collections.abc import Awaitable
18
+ from collections.abc import Callable
19
+ from typing import Any
20
+ from typing import TypeVar
21
+
22
+ from aiq.observability.processor.processor import Processor
23
+
24
+ InputT = TypeVar('InputT')
25
+ OutputT = TypeVar('OutputT')
26
+
27
+
28
+ class CallbackProcessor(Processor[InputT, OutputT]):
29
+ """Abstract base class for processors that support done callbacks.
30
+
31
+ Processors inheriting from this class can register callbacks that are
32
+ invoked when items are ready for further processing or export.
33
+ """
34
+
35
+ @abstractmethod
36
+ def set_done_callback(self, callback: Callable[[Any], Awaitable[None]]) -> None:
37
+ """Set a callback function to be invoked when items are processed.
38
+
39
+ Args:
40
+ callback (Callable[[Any], Awaitable[None]]): Function to call with processed items
41
+ """
42
+ pass
@@ -63,6 +63,9 @@ class Processor(Generic[InputT, OutputT], TypeIntrospectionMixin, ABC):
63
63
  """Process an item and return a potentially different type.
64
64
 
65
65
  Args:
66
- item: The item to process
66
+ item (InputT): The item to process
67
+
68
+ Returns:
69
+ OutputT: The processed item
67
70
  """
68
71
  pass
@@ -34,6 +34,7 @@ from aiq.profiler.calc.data_models import CalcData
34
34
  from aiq.profiler.calc.data_models import CalcRunnerConfig
35
35
  from aiq.profiler.calc.data_models import CalcRunnerOutput
36
36
  from aiq.profiler.calc.data_models import FitConfig
37
+ from aiq.profiler.calc.data_models import FitResults
37
38
  from aiq.profiler.calc.data_models import GPUEstimates
38
39
  from aiq.profiler.calc.data_models import SizingMetricPerItem
39
40
  from aiq.profiler.calc.data_models import SizingMetrics
@@ -408,7 +409,10 @@ class CalcRunner:
408
409
  if gpu_estimates.gpu_estimate_by_llm_latency is not None:
409
410
  logger.info("GPU estimate by LLM latency: %.2f", gpu_estimates.gpu_estimate_by_llm_latency)
410
411
 
411
- return CalcRunnerOutput(gpu_estimates=gpu_estimates, calc_data=calc_data)
412
+ return CalcRunnerOutput(gpu_estimates=gpu_estimates,
413
+ calc_data=calc_data,
414
+ fit_results=FitResults(llm_latency_fit=self.linear_analyzer.llm_latency_fit,
415
+ wf_runtime_fit=self.linear_analyzer.wf_runtime_fit))
412
416
 
413
417
  def plot_concurrency_vs_time_metrics(self, output_dir: Path):
414
418
  """Plots concurrency vs. time metrics using pre-computed fits."""
@@ -17,6 +17,7 @@ import typing
17
17
  from pathlib import Path
18
18
 
19
19
  from pydantic import BaseModel
20
+ from pydantic import Field
20
21
 
21
22
 
22
23
  class FitConfig(BaseModel):
@@ -76,7 +77,7 @@ class CalcRunnerConfig(BaseModel):
76
77
  plot_data: bool = True
77
78
 
78
79
  # Configuration for linear fit and outlier detection
79
- fit_config: FitConfig = FitConfig()
80
+ fit_config: FitConfig = Field(default_factory=FitConfig)
80
81
 
81
82
 
82
83
  # Sizing metrics are gathered from the evaluation runs and used as input by the calculator.
@@ -103,7 +104,7 @@ class SizingMetrics(BaseModel):
103
104
  Sizing metrics for a single concurrency.
104
105
  """
105
106
  # alerts associated with the sizing metrics
106
- alerts: SizingMetricsAlerts = SizingMetricsAlerts()
107
+ alerts: SizingMetricsAlerts = Field(default_factory=SizingMetricsAlerts)
107
108
 
108
109
  # p95 LLM latency
109
110
  llm_latency_p95: float = 0.0
@@ -125,6 +126,14 @@ class LinearFitResult(BaseModel):
125
126
  outliers_removed: list[int]
126
127
 
127
128
 
129
+ class FitResults(BaseModel):
130
+ """
131
+ Linear fit results for both LLM latency and workflow runtime analysis.
132
+ """
133
+ llm_latency_fit: LinearFitResult | None = None
134
+ wf_runtime_fit: LinearFitResult | None = None
135
+
136
+
128
137
  # GPU estimates are generated by the calculator.
129
138
  class GPUEstimates(BaseModel):
130
139
  """
@@ -158,11 +167,11 @@ class CalcData(BaseModel):
158
167
  """
159
168
  # ROUGH GPU estimates per concurrency: these are not used for the final GPU estimation
160
169
  # they are only available for information purposes
161
- gpu_estimates: GPUEstimates = GPUEstimates()
170
+ gpu_estimates: GPUEstimates = Field(default_factory=GPUEstimates)
162
171
  # Calc runner alerts
163
- alerts: CalcAlerts = CalcAlerts()
172
+ alerts: CalcAlerts = Field(default_factory=CalcAlerts)
164
173
  # Sizing metrics
165
- sizing_metrics: SizingMetrics = SizingMetrics()
174
+ sizing_metrics: SizingMetrics = Field(default_factory=SizingMetrics)
166
175
 
167
176
 
168
177
  class CalcRunnerOutput(BaseModel):
@@ -170,7 +179,10 @@ class CalcRunnerOutput(BaseModel):
170
179
  Output of the calc runner.
171
180
  """
172
181
  # GPU estimates based on the slope of the time vs concurrency, calculated online or offline
173
- gpu_estimates: GPUEstimates
182
+ gpu_estimates: GPUEstimates = Field(default_factory=GPUEstimates)
183
+
184
+ # Linear fit results for analysis and debugging
185
+ fit_results: FitResults = Field(default_factory=FitResults)
174
186
 
175
187
  # Per-concurrency data (GPU estimates, out-of-range runs, and sizing metrics)
176
188
  calc_data: dict[int, CalcData] = {}
aiq/runtime/loader.py CHANGED
@@ -175,8 +175,8 @@ def discover_and_register_plugins(plugin_type: PluginTypes):
175
175
  # Log a warning if the plugin took a long time to load. This can be useful for debugging slow imports.
176
176
  # The threshold is 300 ms if no plugins have been loaded yet, and 100 ms otherwise. Triple the threshold
177
177
  # if a debugger is attached.
178
- if (elapsed_time > (300.0 if count == 0 else 100.0) * (3 if is_debugger_attached() else 1)):
179
- logger.warning(
178
+ if (elapsed_time > (300.0 if count == 0 else 150.0) * (3 if is_debugger_attached() else 1)):
179
+ logger.debug(
180
180
  "Loading module '%s' from entry point '%s' took a long time (%f ms). "
181
181
  "Ensure all imports are inside your registered functions.",
182
182
  entry_point.module,
aiq/tool/server_tools.py CHANGED
@@ -63,4 +63,4 @@ async def current_request_attributes(config: RequestAttributesTool, builder: Bui
63
63
  f"Conversation Id: {conversation_id}")
64
64
 
65
65
  yield FunctionInfo.from_fn(_get_request_attributes,
66
- description="Returns the acquired user defined request attriubutes.")
66
+ description="Returns the acquired user defined request attributes.")
@@ -35,9 +35,12 @@ class TypeConverter:
35
35
 
36
36
  def __init__(self, converters: list[Callable[[typing.Any], typing.Any]], parent: "TypeConverter | None" = None):
37
37
  """
38
- :param converters: A list of single-argument converter callables
39
- annotated with their input param and return type.
40
- :param parent: An optional parent TypeConverter for fallback.
38
+ Parameters
39
+ ----------
40
+ converters : list[Callable[[typing.Any], typing.Any]]
41
+ A list of single-argument converter callables annotated with their input param and return type.
42
+ parent : TypeConverter | None
43
+ An optional parent TypeConverter for fallback.
41
44
  """
42
45
  # dict[to_type, dict[from_type, converter]]
43
46
  self._converters: OrderedDict[type, OrderedDict[type, Callable]] = OrderedDict()
@@ -54,6 +57,16 @@ class TypeConverter:
54
57
  """
55
58
  Registers a converter. Must have exactly one parameter
56
59
  and an annotated return type.
60
+
61
+ Parameters
62
+ ----------
63
+ converter : Callable
64
+ A converter function. Must have exactly one parameter and an annotated return type.
65
+
66
+ Raises
67
+ ------
68
+ ValueError
69
+ If the converter does not have a return type or exactly one argument or the argument has no data type.
57
70
  """
58
71
  sig = typing.get_type_hints(converter)
59
72
  to_type = sig.pop("return", None)
@@ -70,7 +83,7 @@ class TypeConverter:
70
83
  self._converters.setdefault(to_type, OrderedDict())[from_type] = converter
71
84
  # to do(MDD): If needed, sort by specificity here.
72
85
 
73
- def _convert(self, data, to_type: type[_T]) -> _T | None:
86
+ def _convert(self, data: typing.Any, to_type: type[_T]) -> _T | None:
74
87
  """
75
88
  Attempts to convert `data` into `to_type`. Returns None if no path is found.
76
89
  """
@@ -95,10 +108,27 @@ class TypeConverter:
95
108
  # 4) If we still haven't succeeded, return None
96
109
  return None
97
110
 
98
- def convert(self, data, to_type: type[_T]) -> _T:
111
+ def convert(self, data: typing.Any, to_type: type[_T]) -> _T:
99
112
  """
100
- Converts or raises ValueError if no path is found.
113
+ Converts or raises ValueError if no conversion path is found.
101
114
  We also give the parent a chance if self fails.
115
+
116
+ Parameters
117
+ ----------
118
+ data : typing.Any
119
+ The value to convert.
120
+ to_type : type
121
+ The type to convert the value to.
122
+
123
+ Returns
124
+ -------
125
+ _T
126
+ The converted value.
127
+
128
+ Raises
129
+ ------
130
+ ValueError
131
+ If the value cannot be converted to the specified type.
102
132
  """
103
133
  result = self._convert(data, to_type)
104
134
  if result is None and self._parent:
@@ -109,10 +139,22 @@ class TypeConverter:
109
139
  return result
110
140
  raise ValueError(f"Cannot convert type {type(data)} to {to_type}. No match found.")
111
141
 
112
- def try_convert(self, data, to_type: type[_T]) -> _T:
142
+ def try_convert(self, data: typing.Any, to_type: type[_T]) -> _T | typing.Any:
113
143
  """
114
144
  Converts with graceful error handling. If conversion fails, returns the original data
115
145
  and continues processing.
146
+
147
+ Parameters
148
+ ----------
149
+ data : typing.Any
150
+ The value to convert.
151
+ to_type : type
152
+ The type to convert the value to.
153
+
154
+ Returns
155
+ -------
156
+ _T | typing.Any
157
+ The converted value, or original value if conversion fails.
116
158
  """
117
159
  try:
118
160
  return self.convert(data, to_type)
@@ -124,7 +166,7 @@ class TypeConverter:
124
166
  # -------------------------------------------------
125
167
  # INTERNAL DIRECT CONVERSION (with parent fallback)
126
168
  # -------------------------------------------------
127
- def _try_direct_conversion(self, data, target_root_type: type) -> typing.Any | None:
169
+ def _try_direct_conversion(self, data: typing.Any, target_root_type: type) -> typing.Any | None:
128
170
  """
129
171
  Tries direct conversion in *this* converter's registry.
130
172
  If no match here, we forward to parent's direct conversion
@@ -149,7 +191,7 @@ class TypeConverter:
149
191
  # -------------------------------------------------
150
192
  # INTERNAL INDIRECT CONVERSION (with parent fallback)
151
193
  # -------------------------------------------------
152
- def _try_indirect_convert(self, data, to_type: type[_T]) -> _T | None:
194
+ def _try_indirect_convert(self, data: typing.Any, to_type: type[_T]) -> _T | None:
153
195
  """
154
196
  Attempt indirect conversion (DFS) in *this* converter.
155
197
  If no success, fallback to parent's indirect attempt.
@@ -234,7 +276,7 @@ class GlobalTypeConverter:
234
276
  return GlobalTypeConverter._global_converter.convert(data, to_type)
235
277
 
236
278
  @staticmethod
237
- def try_convert(data, to_type: type[_T]) -> _T:
279
+ def try_convert(data: typing.Any, to_type: type[_T]) -> _T | typing.Any:
238
280
  return GlobalTypeConverter._global_converter.try_convert(data, to_type)
239
281
 
240
282
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aiqtoolkit
3
- Version: 1.2.0rc2
3
+ Version: 1.2.0rc3
4
4
  Summary: NVIDIA Agent Intelligence toolkit
5
5
  Author: NVIDIA Corporation
6
6
  Maintainer: NVIDIA Corporation
@@ -1,21 +1,21 @@
1
1
  aiq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- aiq/agent/base.py,sha256=ZUOfDRBgZD0eUGnlvAbmg-qDwqnUInQ6_ZXXgIWxCAw,9098
2
+ aiq/agent/base.py,sha256=vKD3q6RiRdXYlmsZOsB9DB0cVfSMNByhjn7uwp8TxGw,9094
3
3
  aiq/agent/dual_node.py,sha256=EOYpYzhaY-m1t2W3eiQrBjSfNjYMDttAwtzEEEcYP4s,2353
4
4
  aiq/agent/register.py,sha256=EATlFFl7ov5HNGySLcPv1T7jzV-Jy-jPVkUzSXDT-7s,1005
5
5
  aiq/agent/react_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- aiq/agent/react_agent/agent.py,sha256=Ljm93VqDRsWSIk4n7d11DOuemcNtlp12m8nglEUeyGc,19465
6
+ aiq/agent/react_agent/agent.py,sha256=w3IkPuU13JElSmmGMbYBo3YZF4LrDXvPHbEZ_ZL-L8s,19424
7
7
  aiq/agent/react_agent/output_parser.py,sha256=m7K6wRwtckBBpAHqOf3BZ9mqZLwrP13Kxz5fvNxbyZE,4219
8
8
  aiq/agent/react_agent/prompt.py,sha256=iGPBU6kh1xbp4QsU1p3o4A0JDov23J1EVM3HSAX6S0A,1713
9
- aiq/agent/react_agent/register.py,sha256=O0IYtUQ_TVtmaBleYKdpthkspX2fAhB0l8DGYLoE6F0,8128
9
+ aiq/agent/react_agent/register.py,sha256=Qz7KO6tZKMB9TGmy-eJzAPmsJEbEl2lu7trXI4a2CmY,8132
10
10
  aiq/agent/reasoning_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
- aiq/agent/reasoning_agent/reasoning_agent.py,sha256=1grbjv8c3neFgcTAl8qYeyPJ3B6gcEqkR6I68LOFPT8,9453
11
+ aiq/agent/reasoning_agent/reasoning_agent.py,sha256=lutxHz3T0HUiFyuQfWmSg-MVRw2YTKQJrYCABtHV6cs,9458
12
12
  aiq/agent/rewoo_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  aiq/agent/rewoo_agent/agent.py,sha256=C8zUpbEFJ0De1tzMNvpxY66Qll2ekjAqSKCIcL0ftLA,19009
14
14
  aiq/agent/rewoo_agent/prompt.py,sha256=2XsuI-db_qmH02ypx_IDvi6jTak15cqt_4pZkUv9TFk,3929
15
15
  aiq/agent/rewoo_agent/register.py,sha256=krm0dUqM5RZpojiLZRpTgAsE0KGqa2NS2QCtlG70EJE,8128
16
16
  aiq/agent/tool_calling_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
17
  aiq/agent/tool_calling_agent/agent.py,sha256=e6VOeBnX_cHXUbnGW8N-ByRtHg4GrbsJecFhA9w0Ksk,5718
18
- aiq/agent/tool_calling_agent/register.py,sha256=kqcN2uovVBQxrIx5MszBS65opbhBrCRlAw00TlG2i30,5408
18
+ aiq/agent/tool_calling_agent/register.py,sha256=Vf_7tOYYDwotxPoPWMuMMr4ZJrPLeLjqBEQ-qVsHbzU,5413
19
19
  aiq/authentication/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
20
20
  aiq/authentication/interfaces.py,sha256=jfsbVx0MTrxZonyTEH0YFcSJoyFHVkfVHFhm9v3jMrY,3317
21
21
  aiq/authentication/register.py,sha256=dYChd2HRv-uv4m8Ebo2sLfbVnksT8D3jEWA-QT-MzX0,947
@@ -44,8 +44,8 @@ aiq/builder/eval_builder.py,sha256=P2yqhPkjvkUi_ZwEsBNJi_qTSmHjraH0_9LXPGnLR7s,6
44
44
  aiq/builder/evaluator.py,sha256=O6Gu0cUwQkrPxPX29Vf_-RopgijxPnhy7mhg_j-9A84,1162
45
45
  aiq/builder/framework_enum.py,sha256=eYwHQifZ86dx-OTubVA3qhCLRqhB4ElMBYBGA0gYtic,885
46
46
  aiq/builder/front_end.py,sha256=Xhvfi4VcDh5EoCtLr6AlLQfbRm8_TyugUc_IRfirN6Y,2225
47
- aiq/builder/function.py,sha256=lDcMcAhtwYmFihpG3iXb6rkb0goKcJx5H7_WqNJhoRg,12775
48
- aiq/builder/function_base.py,sha256=lzAFQtpreDx-XVPzInIzKwizpx8rVyI_hDz1m49ueSY,13113
47
+ aiq/builder/function.py,sha256=ox6wkmSk9ZlwJqAogxCe5rlWr7ZjwlyK2P_iGRJtOlY,13179
48
+ aiq/builder/function_base.py,sha256=xj956YMIySsppijaRkL0GBL0NICtiKqCt4clPFSYJKE,13280
49
49
  aiq/builder/function_info.py,sha256=pGPIAL0tjVqLOJymIRB0boI9pzJGdXiPK3KiZvXQsqM,25266
50
50
  aiq/builder/intermediate_step_manager.py,sha256=sENuXYQKOwnlEZ7f4lKZ63TTVa6FraT8s_ZM9-vwqa4,7614
51
51
  aiq/builder/llm.py,sha256=DcoYCyschsRjkW_yGsa_Ci7ELSpk5KRbi9778Dm_B9c,951
@@ -63,8 +63,8 @@ aiq/cli/cli_utils/config_override.py,sha256=WuX9ki1W0Z6jTqjm553U_owWFxbVzjbKRWGJ
63
63
  aiq/cli/cli_utils/validation.py,sha256=GlKpoi3HfE5HELjmz5wk8ezGbb5iZeY0zmA3uxmCrBU,1302
64
64
  aiq/cli/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
65
65
  aiq/cli/commands/evaluate.py,sha256=_pqAuvrNKBf0DvGpZFO28vAKBWp6izMpaLbAVnP57_4,4783
66
- aiq/cli/commands/start.py,sha256=8mnJYSToQ1XuYuRBRSonuOsjSL1wqSm8nwj3tH9cnAM,10097
67
- aiq/cli/commands/uninstall.py,sha256=tTb5WsyRPPXo511yAGSvSG7U19swbQs8Cf_B4rh7mxQ,3248
66
+ aiq/cli/commands/start.py,sha256=zQJdSW9xQbK1oahM-mWaIXm-B2E25n2SZFNv6bSkQ6Y,9838
67
+ aiq/cli/commands/uninstall.py,sha256=D3PGizMGy-c3DLQ-HBcVYPOv0X8eyxFFw27jJW4kxig,3195
68
68
  aiq/cli/commands/validate.py,sha256=YfYNRK7m5te_jkKp1klhGp4PdUVCuDyVG4LRW1YXZk0,1669
69
69
  aiq/cli/commands/configure/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
70
70
  aiq/cli/commands/configure/configure.py,sha256=b_rnfThV161rDmuRO0Jbke-10oSn4RJj2q4IdTvL3ng,1107
@@ -85,7 +85,7 @@ aiq/cli/commands/registry/registry.py,sha256=oo-dPYoKqmGc9lqw3FN9ADg6mGMHwfM14hX
85
85
  aiq/cli/commands/registry/remove.py,sha256=ysyfA38NKFZpktjKqDkVK54e9AakpAmBGym2OF6xSuo,3915
86
86
  aiq/cli/commands/registry/search.py,sha256=1AhW5Q5GL8SsR6fKgG0rbsd2E0nodm-ehYZwqX2SGgA,5125
87
87
  aiq/cli/commands/sizing/__init__.py,sha256=GUJrgGtpvyMUCjUBvR3faAdv-tZzbU9W-izgx9aMEQg,680
88
- aiq/cli/commands/sizing/calc.py,sha256=jJq4BHrdtaiPK9P7f-AblvEouq1fbFZxnGn3GGtWsHY,11007
88
+ aiq/cli/commands/sizing/calc.py,sha256=vAcDknEG8tbmfJV7GcsdbPePOvb-Z8fIZUeW5Q5dl2w,11150
89
89
  aiq/cli/commands/sizing/sizing.py,sha256=-Hr9mz_ScEMtBbn6ijvmmWVk0WybLeX0Ryi4qhDiYQU,902
90
90
  aiq/cli/commands/workflow/__init__.py,sha256=GUJrgGtpvyMUCjUBvR3faAdv-tZzbU9W-izgx9aMEQg,680
91
91
  aiq/cli/commands/workflow/workflow.py,sha256=gzd_UXIMGq_NBRZiS_WHh3Dimuw9aTdncREVh8KItJI,1342
@@ -96,7 +96,7 @@ aiq/cli/commands/workflow/templates/pyproject.toml.j2,sha256=tDV7-vbt8Of82OEdSOi
96
96
  aiq/cli/commands/workflow/templates/register.py.j2,sha256=SlOFmIZakPDu_E6DbIhUZ3yP8KhTrAQCFGBuhy9Fyg4,170
97
97
  aiq/cli/commands/workflow/templates/workflow.py.j2,sha256=NRp0MP8GtZByk7lrHp2Y5_6iEopRK2Wyrt0v0_2qQeo,1226
98
98
  aiq/data_models/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
99
- aiq/data_models/api_server.py,sha256=Jx-IZb_lnddjeQxkIBPYSosoEdZYvRWmkVURAot-FbM,25404
99
+ aiq/data_models/api_server.py,sha256=dHlRq_jI8N6dFHiKVJ_y4mMOHKkKP9TPM_A6oc0GSCQ,24969
100
100
  aiq/data_models/authentication.py,sha256=TShP47-z9vIrkV3S4sadm0QF7YUkEWtsToKSBiHS3NI,7349
101
101
  aiq/data_models/common.py,sha256=y_8AiWmTEaMjCMayVaFYddhv2AAou8Pr84isHgGxeUg,5874
102
102
  aiq/data_models/component.py,sha256=_HeHlDmz2fgJlVfmz0tG_yCyes86hQNaQwSWz1FDQvk,1737
@@ -208,7 +208,7 @@ aiq/front_ends/register.py,sha256=OKv1xi-g8WHtUMuIPhwjG6wOYqaGDD-Q9vDtKtT9d1Y,88
208
208
  aiq/front_ends/console/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
209
209
  aiq/front_ends/console/authentication_flow_handler.py,sha256=pKgaAnBfr3Zc5V3yPiyU-oVZyCC-dNwjvhLn1gSxnUE,9566
210
210
  aiq/front_ends/console/console_front_end_config.py,sha256=vI81IK9GJll0t9P-3yb1JcUda5PLfdvyKc-636ZSqYU,1327
211
- aiq/front_ends/console/console_front_end_plugin.py,sha256=3qOLXKI6JCE33oh2MylrxMm_EonZlUCGjxZrPvTJ_4k,4535
211
+ aiq/front_ends/console/console_front_end_plugin.py,sha256=WXrB5ugakwsoGxbhXJPJjyb5Evoe-eDHBPOnJdtWO9A,3937
212
212
  aiq/front_ends/console/register.py,sha256=a84M0jWUFTgOQVyrUiS7UJcxx84i1zhCb1yRkjhapiQ,1159
213
213
  aiq/front_ends/cron/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
214
214
  aiq/front_ends/fastapi/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
@@ -235,7 +235,7 @@ aiq/front_ends/mcp/mcp_front_end_plugin.py,sha256=XXyu5A5zWLH-eZgLDGcdCELXkwC1YP
235
235
  aiq/front_ends/mcp/register.py,sha256=9u5AJVH5UXiniP9bmsOjpfWVzQLHFlUPfL5HZ10Qwnw,1179
236
236
  aiq/front_ends/mcp/tool_converter.py,sha256=Pgb06Gtb8MVWeV_dAaI4Tl0Hono_mSuJAHRxTvSONHQ,9840
237
237
  aiq/front_ends/simple_base/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
238
- aiq/front_ends/simple_base/simple_front_end_plugin_base.py,sha256=HzcJFHZUZFnZJdw4YcRTG6nQbTDoBQSgKO6vwifSomk,1684
238
+ aiq/front_ends/simple_base/simple_front_end_plugin_base.py,sha256=HFGa3qJXEK_og4lwXid_YGArlm1lz0GOZ89dMWn152E,1774
239
239
  aiq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
240
240
  aiq/llm/aws_bedrock_llm.py,sha256=FFlwS7xZYSTRuPWU_vxvTbmPTbuZpN0ByVxYukEThM4,2836
241
241
  aiq/llm/nim_llm.py,sha256=_9WwR4Pt9RCg8RG9oBeYxSt94KcZsMEVoKOFjig_2Zo,2179
@@ -261,9 +261,9 @@ aiq/observability/exporter/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48j
261
261
  aiq/observability/exporter/base_exporter.py,sha256=uVr2qssNwJPVJP0P9fD7tNVSdUmj4SWx4GVHqkA1_3s,16639
262
262
  aiq/observability/exporter/exporter.py,sha256=QjEtbaTC7LTQpuzdcvRK7gUs6OIlHzQnhzD2pGmXyws,2391
263
263
  aiq/observability/exporter/file_exporter.py,sha256=Z1DhUSpTu1SmN282pPRTU20QJZox49jZ75ARsUkgZAk,1496
264
- aiq/observability/exporter/processing_exporter.py,sha256=4X-LIiJFWryo8ZRrgmz2A5X-JyU3EbVirT6y8rWL9Ck,12310
264
+ aiq/observability/exporter/processing_exporter.py,sha256=wLQ7luy5pZD_uVeAI_JDS92MJhyLDSmoOWeSNL_G3uI,14493
265
265
  aiq/observability/exporter/raw_exporter.py,sha256=9MFukCkJE7qHBWf2V2rnsCdzL1HRYRarcudveTsvS8w,1862
266
- aiq/observability/exporter/span_exporter.py,sha256=9KHedqEAZnPyoPNbVLb4yGsT_2LCYPSCbh3XLCoN9NA,12062
266
+ aiq/observability/exporter/span_exporter.py,sha256=AxEAFeEN3oOs-5ljveDWWCYuJDhVeNXzqyIDS5Gx6Rc,12095
267
267
  aiq/observability/mixin/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
268
268
  aiq/observability/mixin/batch_config_mixin.py,sha256=DixQq-jRhBFJvpOX-gq7GvPmZCPOXQdacylyEuhZ6y0,1399
269
269
  aiq/observability/mixin/collector_config_mixin.py,sha256=3iptkRH9N6JgcsPq7GyjjJVAoxjd-l42UKE7iSF4Hq8,1087
@@ -273,9 +273,10 @@ aiq/observability/mixin/resource_conflict_mixin.py,sha256=mcUp3Qinmhiepq3DyRvp9I
273
273
  aiq/observability/mixin/serialize_mixin.py,sha256=DgRHJpXCz9qHFYzhlTTx8Dkj297EylCKK3ydGrH5zOw,2478
274
274
  aiq/observability/mixin/type_introspection_mixin.py,sha256=VCb68SY_hitWrWLaK2UHQLkjn2jsgxSn9593T2N3zC0,6637
275
275
  aiq/observability/processor/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
276
- aiq/observability/processor/batching_processor.py,sha256=unPc4bZcBgLzMTPVp2x9ezXOPsrZj9LZvrMDsld26KU,13441
276
+ aiq/observability/processor/batching_processor.py,sha256=dE5E_0oHwISXfCxGXpOJ5fEM-YKIfuPz5h9j9gpJoMQ,13857
277
+ aiq/observability/processor/callback_processor.py,sha256=U1IhQq5x5H6FaoqWCjZZqOFb5RcTdd4ORoQZCHgkdC8,1547
277
278
  aiq/observability/processor/intermediate_step_serializer.py,sha256=UQgcHauq568TqVmF_FrInsE5Q_Piryrf_fDnuJbRtAc,1249
278
- aiq/observability/processor/processor.py,sha256=WY-olVcVe2VNB9iopxVIj4xwVODlJ8UcHKqPBh7UqsE,2526
279
+ aiq/observability/processor/processor.py,sha256=kfYe1EiQZkOPSnqIwQ6Rjz44j-EpreRAe2uIspmHK9w,2593
279
280
  aiq/observability/utils/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
280
281
  aiq/observability/utils/dict_utils.py,sha256=DcNhZ0mgcJ-QQfsCl9QSGL-m_jTuHhr1N-v43ZCAMik,7371
281
282
  aiq/observability/utils/time_utils.py,sha256=V8m-e3ldUgwv031B17y29yLXIowdlTH4QW8xDw9WKvk,1071
@@ -288,9 +289,9 @@ aiq/profiler/intermediate_property_adapter.py,sha256=XZ_A8f2S5M-EJSkErY6I750Y8HA
288
289
  aiq/profiler/profile_runner.py,sha256=aNwzcgw9udNh_rgTjtPCScAfvn7ug63LVzW13AvhWRY,22363
289
290
  aiq/profiler/utils.py,sha256=hNh_JfxXDrACIp4usXtlriTfVuYUkk3Pv-x74K34MQg,8180
290
291
  aiq/profiler/calc/__init__.py,sha256=GUJrgGtpvyMUCjUBvR3faAdv-tZzbU9W-izgx9aMEQg,680
291
- aiq/profiler/calc/calc_runner.py,sha256=r65olaY-AjuE0lCwlC7Ob9MJ3opopYzGuTqDwx5UOd4,30379
292
+ aiq/profiler/calc/calc_runner.py,sha256=r06RJi7bWGfI39lmG6qjS-l9ZkWm_r7AbzBRNYuVc9w,30681
292
293
  aiq/profiler/calc/calculations.py,sha256=GUDceQOs52QMmHg0u6BVgq_cmFKjXLbuQdNyd7UVZ0A,12327
293
- aiq/profiler/calc/data_models.py,sha256=Wq3REnYWiJR5EWMH-ZzouJRo2NUi78DfyKevNmXKqX8,5662
294
+ aiq/profiler/calc/data_models.py,sha256=vmBu89C1LsJyIRpEEzGi29_cFJDZJUJXOSXWtraVfd8,6172
294
295
  aiq/profiler/calc/plot.py,sha256=GW1cK8U2w6hIzKp6ItciCmxHR0KeUCw7n5EvpcPsoio,12222
295
296
  aiq/profiler/callbacks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
296
297
  aiq/profiler/callbacks/agno_callback_handler.py,sha256=aDAUY6GDIUtly6KowXXKUqLc7NbE6khg1aXT1AritaA,14930
@@ -354,7 +355,7 @@ aiq/retriever/nemo_retriever/__init__.py,sha256=GUJrgGtpvyMUCjUBvR3faAdv-tZzbU9W
354
355
  aiq/retriever/nemo_retriever/register.py,sha256=ODV-TZfXzDs1VJHHLdj2kC05odirtlQZSeh9c1zw8AQ,2893
355
356
  aiq/retriever/nemo_retriever/retriever.py,sha256=IvScUr9XuDLiMR__I3QsboLaM52N5D5Qu94qtTOGQw8,6958
356
357
  aiq/runtime/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
357
- aiq/runtime/loader.py,sha256=CqZzeIpN_aWUEASr4JBtQjmVtSv2DNpu6dFha_4iRI8,7119
358
+ aiq/runtime/loader.py,sha256=drLE-OnrAPjFgWrQ3ZsnhHSbon8GkRsUk0p7Oe8Jn2I,7117
358
359
  aiq/runtime/runner.py,sha256=CqmlVAYfrBh3ml3t2n3V693RaNyxtK9ScWT4S-Isbr8,6365
359
360
  aiq/runtime/session.py,sha256=i1pIqopZCBgGJqVUskKLiBnZYH-lTdMhvFu56dXAU5A,6206
360
361
  aiq/runtime/user_metadata.py,sha256=9EiBc-EEJzOdpf3Q1obHqAdY_kRlJ1T0TVvY0Jonk6o,3692
@@ -368,7 +369,7 @@ aiq/tool/document_search.py,sha256=w3D3r5ZBS2jYmVAZZ7lC7xCoi25bA1RePoFjjlV1Zog,6
368
369
  aiq/tool/nvidia_rag.py,sha256=9mS3igONo1RywxXNj_ITh2-qD91x1R0f7uhOWMZQX3o,4178
369
370
  aiq/tool/register.py,sha256=Lwl6l_eEzS8LAELxClmniNhhLluRVZFYXhsk2ocQhNg,1491
370
371
  aiq/tool/retriever.py,sha256=DnuU4khpJkd4epDBGQsowDOqDBKFiLQrnyKXgU6IRW8,3724
371
- aiq/tool/server_tools.py,sha256=7wnB-bfFb2lcGSJuG4Wu9PL_N4cDYRu29zFXxIIa9-8,3201
372
+ aiq/tool/server_tools.py,sha256=286hTIvX_8pEohmkqmgWTCLkTfMTjuR9v0zivcW17r4,3200
372
373
  aiq/tool/code_execution/README.md,sha256=GQy3pPOVspFHQdclQCCweIAY2r8rVZ6bXyCY2FcEc6M,4090
373
374
  aiq/tool/code_execution/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
374
375
  aiq/tool/code_execution/code_sandbox.py,sha256=ibe6BoYdWzmQWHdVNweVP-QW1WRGirfhvFr0iqm4fI8,10159
@@ -404,7 +405,7 @@ aiq/utils/metadata_utils.py,sha256=lGYvc8Gk0az4qZDGeRbVz4L7B_b-Gnjss8JT4goqL5I,2
404
405
  aiq/utils/optional_imports.py,sha256=jQSVBc2fBSRw-2d6r8cEwvh5-di2EUUPakuuo9QbbwA,4039
405
406
  aiq/utils/producer_consumer_queue.py,sha256=AcSYkAMBxLx06A5Xdy960PP3AJ7YaSPGJ7rbN_hJsjI,6599
406
407
  aiq/utils/string_utils.py,sha256=71HuIzGx7rF8ocTmeoUBpnCi1Qf1yynYlNLLIKP4BVs,1415
407
- aiq/utils/type_converter.py,sha256=w711BBOwzhtJo2si722C0wPtACFwZvOaKZQDwScAunY,9461
408
+ aiq/utils/type_converter.py,sha256=mu09lnTOrsYee1gQlc1zLoDL7rJhzN7VNhK6viIS5gA,10640
408
409
  aiq/utils/type_utils.py,sha256=G-SYlk4BkJv4L225myUhwBY6-Z7wOgR9K49dVUtV8SA,14896
409
410
  aiq/utils/url_utils.py,sha256=UzDP_xaS6brWTu7vAws0B4jZyrITIK9Si3U6pZBZqDE,1028
410
411
  aiq/utils/data_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -427,10 +428,10 @@ aiq/utils/reactive/base/observer_base.py,sha256=UAlyAY_ky4q2t0P81RVFo2Bs_R7z5Nde
427
428
  aiq/utils/reactive/base/subject_base.py,sha256=Ed-AC6P7cT3qkW1EXjzbd5M9WpVoeN_9KCe3OM3FLU4,2521
428
429
  aiq/utils/settings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
429
430
  aiq/utils/settings/global_settings.py,sha256=U9TCLdoZsKq5qOVGjREipGVv9e-FlStzqy5zv82_VYk,7454
430
- aiqtoolkit-1.2.0rc2.dist-info/licenses/LICENSE-3rd-party.txt,sha256=8o7aySJa9CBvFshPcsRdJbczzdNyDGJ8b0J67WRUQ2k,183936
431
- aiqtoolkit-1.2.0rc2.dist-info/licenses/LICENSE.md,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
432
- aiqtoolkit-1.2.0rc2.dist-info/METADATA,sha256=SZwCOh4Pv7hbJMosDBuaGY4svIioO4aeivAQR0ifrN0,21558
433
- aiqtoolkit-1.2.0rc2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
434
- aiqtoolkit-1.2.0rc2.dist-info/entry_points.txt,sha256=iZR3yrf1liXfbcLqn5_pUkLhZyr1bUw_Qh1d2i7gsv4,625
435
- aiqtoolkit-1.2.0rc2.dist-info/top_level.txt,sha256=fo7AzYcNhZ_tRWrhGumtxwnxMew4xrT1iwouDy_f0Kc,4
436
- aiqtoolkit-1.2.0rc2.dist-info/RECORD,,
431
+ aiqtoolkit-1.2.0rc3.dist-info/licenses/LICENSE-3rd-party.txt,sha256=8o7aySJa9CBvFshPcsRdJbczzdNyDGJ8b0J67WRUQ2k,183936
432
+ aiqtoolkit-1.2.0rc3.dist-info/licenses/LICENSE.md,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
433
+ aiqtoolkit-1.2.0rc3.dist-info/METADATA,sha256=X2SwqPxRdjMtrQ0l2Q9Gg439pHoduJja350zK9gCLc0,21558
434
+ aiqtoolkit-1.2.0rc3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
435
+ aiqtoolkit-1.2.0rc3.dist-info/entry_points.txt,sha256=iZR3yrf1liXfbcLqn5_pUkLhZyr1bUw_Qh1d2i7gsv4,625
436
+ aiqtoolkit-1.2.0rc3.dist-info/top_level.txt,sha256=fo7AzYcNhZ_tRWrhGumtxwnxMew4xrT1iwouDy_f0Kc,4
437
+ aiqtoolkit-1.2.0rc3.dist-info/RECORD,,