aiqtoolkit 1.2.0a20250613__py3-none-any.whl → 1.2.0a20250615__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiqtoolkit might be problematic. Click here for more details.

@@ -38,7 +38,7 @@ class ToolCallAgentWorkflowConfig(FunctionBaseConfig, name="tool_calling_agent")
38
38
  tool_names: list[FunctionRef] = Field(default_factory=list,
39
39
  description="The list of tools to provide to the tool calling agent.")
40
40
  llm_name: LLMRef = Field(description="The LLM model to use with the tool calling agent.")
41
- verbose: bool = Field(default=False, description="Set the verbosity of the react agent's logging.")
41
+ verbose: bool = Field(default=False, description="Set the verbosity of the tool calling agent's logging.")
42
42
  handle_tool_errors: bool = Field(default=True, description="Specify ability to handle tool calling errors.")
43
43
  description: str = Field(default="Tool Calling Agent Workflow", description="Description of this functions use.")
44
44
  max_iterations: int = Field(default=15, description="Number of tool calls before stoping the tool calling agent.")
@@ -13,17 +13,23 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
+ import asyncio
16
17
  import logging
18
+ from typing import Callable
17
19
 
18
20
  from langchain.output_parsers import ResponseSchema
19
21
  from langchain.output_parsers import StructuredOutputParser
20
22
  from langchain.schema import HumanMessage
21
23
  from langchain.schema import SystemMessage
22
24
  from langchain_core.language_models import BaseChatModel
25
+ from langchain_core.runnables import RunnableLambda
26
+ from tqdm import tqdm
23
27
 
24
- from aiq.eval.evaluator.base_evaluator import BaseEvaluator
28
+ from aiq.eval.evaluator.evaluator_model import EvalInput
25
29
  from aiq.eval.evaluator.evaluator_model import EvalInputItem
30
+ from aiq.eval.evaluator.evaluator_model import EvalOutput
26
31
  from aiq.eval.evaluator.evaluator_model import EvalOutputItem
32
+ from aiq.eval.utils.tqdm_position_registry import TqdmPositionRegistry
27
33
 
28
34
  logger = logging.getLogger(__name__)
29
35
 
@@ -65,150 +71,222 @@ def evaluation_prompt(judge_llm_prompt: str,
65
71
  return EVAL_PROMPT if not default_scoring else DEFAULT_EVAL_PROMPT
66
72
 
67
73
 
68
- class TunableRagEvaluator(BaseEvaluator):
74
+ def runnable_with_retries(original_fn: Callable, llm_retry_control_params: dict | None = None):
75
+ runnable = RunnableLambda(original_fn)
76
+
77
+ if llm_retry_control_params is None:
78
+ llm_retry_control_params = {
79
+ "stop_after_attempt": 3, "initial_backoff_delay_seconds": 1, "has_exponential_jitter": True
80
+ }
81
+
82
+ if llm_retry_control_params["has_exponential_jitter"] is None:
83
+ llm_retry_control_params["has_exponential_jitter"] = True
84
+ if llm_retry_control_params["stop_after_attempt"] is None:
85
+ llm_retry_control_params["stop_after_attempt"] = 3
86
+ if llm_retry_control_params["initial_backoff_delay_seconds"] is None:
87
+ llm_retry_control_params["initial_backoff_delay_seconds"] = 1
88
+
89
+ # Add retry logic with exponential backoff and jitter
90
+ return runnable.with_retry(
91
+ retry_if_exception_type=(Exception, ), # Retry on any error
92
+ wait_exponential_jitter=llm_retry_control_params["has_exponential_jitter"], # Add jitter to exponential backoff
93
+ stop_after_attempt=llm_retry_control_params["stop_after_attempt"],
94
+ exponential_jitter_params={"initial": llm_retry_control_params["initial_backoff_delay_seconds"]
95
+ } # Optional: set initial backoff (seconds)
96
+ )
97
+
98
+
99
+ class TunableRagEvaluator:
69
100
  '''Tunable RAG evaluator class with customizable LLM prompt for scoring.'''
70
101
 
71
102
  def __init__(self,
72
103
  llm: BaseChatModel,
73
104
  judge_llm_prompt: str,
105
+ llm_retry_control_params: dict | None,
74
106
  max_concurrency: int,
75
107
  default_scoring: bool,
76
108
  default_score_weights: dict):
77
- super().__init__(max_concurrency=max_concurrency, tqdm_desc="Evaluating RAG")
78
109
  self.llm = llm
110
+ self.max_concurrency = max_concurrency
79
111
  self.judge_llm_prompt = judge_llm_prompt
112
+ self.llm_retry_control_params = llm_retry_control_params
113
+ self.semaphore = asyncio.Semaphore(self.max_concurrency)
80
114
  self.default_scoring = default_scoring
81
115
  # Use user-provided weights if available; otherwise, set equal weights for each score
82
116
  self.default_score_weights = default_score_weights if default_score_weights else {
83
117
  "coverage": 1 / 3, "correctness": 1 / 3, "relevance": 1 / 3
84
118
  }
85
119
 
86
- async def evaluate_item(self, item: EvalInputItem) -> EvalOutputItem:
87
- '''Evaluate a single item'''
88
- question = item.input_obj
89
- answer_description = item.expected_output_obj
90
- generated_answer = item.output_obj
91
-
92
- # Call judge LLM to generate score
93
- score = 0.0
94
-
95
- default_evaluation_schema = [
96
- ResponseSchema(
97
- name="coverage_score",
98
- description="Score for the coverage of all critical aspects mentioned in the expected answer. Ex. 0.5",
99
- type="float"),
100
- ResponseSchema(
101
- name="correctness_score",
102
- description="Score for the accuracy of the generated answer compared to the expected answer. Ex. 0.5",
103
- type="float"),
104
- ResponseSchema(name="relevance_score",
105
- description="Score for the relevance of the generated answer to the question. Ex. 0.5",
106
- type="float"),
107
- ResponseSchema(
108
- name="reasoning",
109
- description=
110
- "1-2 summarized sentences of reasoning for the scores. Ex. 'The generated answer covers all critical aspects mentioned in the expected answer, is correct, and is relevant to the question.'",
111
- type="string"),
112
- ]
120
+ async def evaluate(self, eval_input: EvalInput) -> EvalOutput:
121
+ '''Evaluate function'''
113
122
 
114
- custom_evaluation_schema = [
115
- ResponseSchema(name="score", description="Score for the generated answer. Ex. 0.5", type="float"),
116
- ResponseSchema(
117
- name="reasoning",
118
- description=
119
- "1-2 sentence reasoning for the score. Ex. 'The generated answer is exactly the same as the description of the expected answer.'",
120
- type="string"),
121
- ]
123
+ async def process_item(item):
124
+ """Compute RAG evaluation for an individual item"""
125
+ question = item.input_obj
126
+ answer_description = item.expected_output_obj
127
+ generated_answer = item.output_obj
128
+
129
+ # Call judge LLM to generate score
130
+ score = 0.0
131
+
132
+ default_evaluation_schema = [
133
+ ResponseSchema(
134
+ name="coverage_score",
135
+ description=
136
+ "Score for the coverage of all critical aspects mentioned in the expected answer. Ex. 0.5",
137
+ type="float"),
138
+ ResponseSchema(
139
+ name="correctness_score",
140
+ description=
141
+ "Score for the accuracy of the generated answer compared to the expected answer. Ex. 0.5",
142
+ type="float"),
143
+ ResponseSchema(name="relevance_score",
144
+ description="Score for the relevance of the generated answer to the question. Ex. 0.5",
145
+ type="float"),
146
+ ResponseSchema(
147
+ name="reasoning",
148
+ description=
149
+ "1-2 summarized sentences of reasoning for the scores. Ex. 'The generated answer covers all critical aspects mentioned in the expected answer, is correct, and is relevant to the question.'",
150
+ type="string"),
151
+ ]
152
+
153
+ custom_evaluation_schema = [
154
+ ResponseSchema(name="score", description="Score for the generated answer. Ex. 0.5", type="float"),
155
+ ResponseSchema(
156
+ name="reasoning",
157
+ description=
158
+ "1-2 sentence reasoning for the score. Ex. 'The generated answer is exactly the same as the description of the expected answer.'",
159
+ type="string"),
160
+ ]
122
161
 
123
- if self.default_scoring:
124
- evaluation_schema = default_evaluation_schema
125
- else:
126
- evaluation_schema = custom_evaluation_schema
162
+ if self.default_scoring:
163
+ evaluation_schema = default_evaluation_schema
164
+ else:
165
+ evaluation_schema = custom_evaluation_schema
166
+
167
+ llm_input_response_parser = StructuredOutputParser.from_response_schemas(evaluation_schema)
168
+ format_instructions = llm_input_response_parser.get_format_instructions()
127
169
 
128
- llm_input_response_parser = StructuredOutputParser.from_response_schemas(evaluation_schema)
129
- format_instructions = llm_input_response_parser.get_format_instructions()
170
+ eval_prompt = evaluation_prompt(judge_llm_prompt=self.judge_llm_prompt,
171
+ question=question,
172
+ answer_description=answer_description,
173
+ generated_answer=generated_answer,
174
+ format_instructions=format_instructions,
175
+ default_scoring=self.default_scoring)
130
176
 
131
- eval_prompt = evaluation_prompt(judge_llm_prompt=self.judge_llm_prompt,
132
- question=question,
133
- answer_description=answer_description,
134
- generated_answer=generated_answer,
135
- format_instructions=format_instructions,
136
- default_scoring=self.default_scoring)
177
+ messages = [
178
+ SystemMessage(content="You must respond only in JSON format."), HumanMessage(content=eval_prompt)
179
+ ]
137
180
 
138
- messages = [SystemMessage(content="You must respond only in JSON format."), HumanMessage(content=eval_prompt)]
181
+ response = await runnable_with_retries(self.llm.ainvoke, self.llm_retry_control_params).ainvoke(messages)
139
182
 
140
- response = await self.llm.ainvoke(messages)
183
+ # Initialize default values to handle service errors
184
+ coverage_score = 0.0
185
+ correctness_score = 0.0
186
+ relevance_score = 0.0
187
+ reasoning = "Error in evaluator from parsing judge LLM response."
141
188
 
142
- # Initialize default values to handle service errors
143
- coverage_score = 0.0
144
- correctness_score = 0.0
145
- relevance_score = 0.0
146
- reasoning = "Error in evaluator from parsing judge LLM response."
189
+ try:
190
+ parsed_response = llm_input_response_parser.parse(response.content)
191
+ if self.default_scoring:
192
+ try:
193
+ coverage_score = parsed_response["coverage_score"]
194
+ correctness_score = parsed_response["correctness_score"]
195
+ relevance_score = parsed_response["relevance_score"]
196
+ reasoning = parsed_response["reasoning"]
197
+ except KeyError as e:
198
+ logger.error("Missing required keys in default scoring response: %s",
199
+ ", ".join(str(arg) for arg in e.args))
200
+ reasoning = f"Error in evaluator from parsing judge LLM response. Missing required key(s): {', '.join(str(arg) for arg in e.args)}"
201
+
202
+ coverage_weight = self.default_score_weights.get("coverage", 1 / 3)
203
+ correctness_weight = self.default_score_weights.get("correctness", 1 / 3)
204
+ relevance_weight = self.default_score_weights.get("relevance", 1 / 3)
205
+
206
+ # Calculate score
207
+ total_weight = coverage_weight + correctness_weight + relevance_weight
208
+ coverage_weight = coverage_weight / total_weight
209
+ correctness_weight = correctness_weight / total_weight
210
+ relevance_weight = relevance_weight / total_weight
211
+
212
+ if round(coverage_weight + correctness_weight + relevance_weight, 2) != 1:
213
+ logger.warning("The sum of the default score weights is not 1. The weights will be normalized.")
214
+ coverage_weight = coverage_weight / (coverage_weight + correctness_weight + relevance_weight)
215
+ correctness_weight = correctness_weight / (coverage_weight + correctness_weight +
216
+ relevance_weight)
217
+ relevance_weight = relevance_weight / (coverage_weight + correctness_weight + relevance_weight)
218
+
219
+ score = (coverage_weight * coverage_score + correctness_weight * correctness_score +
220
+ relevance_weight * relevance_score)
221
+
222
+ else:
223
+ try:
224
+ score = parsed_response["score"]
225
+ reasoning = parsed_response["reasoning"]
226
+ except KeyError as e:
227
+ logger.error("Missing required keys in custom scoring response: %s",
228
+ ", ".join(str(arg) for arg in e.args))
229
+ reasoning = f"Error in evaluator from parsing judge LLM response. Missing required key(s): {', '.join(str(arg) for arg in e.args)}"
230
+ raise
231
+ except (KeyError, ValueError) as e:
232
+ logger.error("Error parsing judge LLM response: %s", e)
233
+ score = 0.0
234
+ reasoning = "Error in evaluator from parsing judge LLM response."
147
235
 
148
- try:
149
- parsed_response = llm_input_response_parser.parse(response.content)
150
236
  if self.default_scoring:
151
- try:
152
- coverage_score = parsed_response["coverage_score"]
153
- correctness_score = parsed_response["correctness_score"]
154
- relevance_score = parsed_response["relevance_score"]
155
- reasoning = parsed_response["reasoning"]
156
- except KeyError as e:
157
- logger.error("Missing required keys in default scoring response: %s",
158
- ", ".join(str(arg) for arg in e.args))
159
- reasoning = f"Error in evaluator from parsing judge LLM response. Missing required key(s): {', '.join(str(arg) for arg in e.args)}"
160
-
161
- coverage_weight = self.default_score_weights.get("coverage", 1 / 3)
162
- correctness_weight = self.default_score_weights.get("correctness", 1 / 3)
163
- relevance_weight = self.default_score_weights.get("relevance", 1 / 3)
164
-
165
- # Calculate score
166
- total_weight = coverage_weight + correctness_weight + relevance_weight
167
- coverage_weight = coverage_weight / total_weight
168
- correctness_weight = correctness_weight / total_weight
169
- relevance_weight = relevance_weight / total_weight
170
-
171
- if round(coverage_weight + correctness_weight + relevance_weight, 2) != 1:
172
- logger.warning("The sum of the default score weights is not 1. The weights will be normalized.")
173
- coverage_weight = coverage_weight / (coverage_weight + correctness_weight + relevance_weight)
174
- correctness_weight = correctness_weight / (coverage_weight + correctness_weight + relevance_weight)
175
- relevance_weight = relevance_weight / (coverage_weight + correctness_weight + relevance_weight)
176
-
177
- score = (coverage_weight * coverage_score + correctness_weight * correctness_score +
178
- relevance_weight * relevance_score)
179
-
237
+ reasoning = {
238
+ "question": question,
239
+ "answer_description": answer_description,
240
+ "generated_answer": generated_answer,
241
+ "score_breakdown": {
242
+ "coverage_score": coverage_score,
243
+ "correctness_score": correctness_score,
244
+ "relevance_score": relevance_score,
245
+ },
246
+ "reasoning": reasoning,
247
+ }
180
248
  else:
181
- try:
182
- score = parsed_response["score"]
183
- reasoning = parsed_response["reasoning"]
184
- except KeyError as e:
185
- logger.error("Missing required keys in custom scoring response: %s",
186
- ", ".join(str(arg) for arg in e.args))
187
- reasoning = f"Error in evaluator from parsing judge LLM response. Missing required key(s): {', '.join(str(arg) for arg in e.args)}"
188
- raise
189
- except (KeyError, ValueError) as e:
190
- logger.error("Error parsing judge LLM response: %s", e)
191
- score = 0.0
192
- reasoning = "Error in evaluator from parsing judge LLM response."
249
+ reasoning = {
250
+ "question": question,
251
+ "answer_description": answer_description,
252
+ "generated_answer": generated_answer,
253
+ "reasoning": reasoning
254
+ }
255
+
256
+ return score, reasoning
257
+
258
+ async def wrapped_process(item: EvalInputItem) -> tuple[float, dict]:
259
+ """
260
+ Process an item asynchronously and update the progress bar.
261
+ Use the semaphore to limit the number of concurrent items.
262
+ """
263
+ async with self.semaphore:
264
+ result = await process_item(item)
265
+ # Update the progress bar
266
+ pbar.update(1)
267
+ return result
268
+
269
+ try:
270
+ # Claim a tqdm position to display the progress bar
271
+ tqdm_position = TqdmPositionRegistry.claim()
272
+ # Create a progress bar
273
+ pbar = tqdm(total=len(eval_input.eval_input_items), desc="Evaluating RAG", position=tqdm_position)
274
+ # Process items concurrently with a limit on concurrency
275
+ results = await asyncio.gather(*[wrapped_process(item) for item in eval_input.eval_input_items])
276
+ finally:
277
+ pbar.close()
278
+ TqdmPositionRegistry.release(tqdm_position)
279
+
280
+ # Extract scores and reasonings
281
+ sample_scores, sample_reasonings = zip(*results) if results else ([], [])
282
+
283
+ # Compute average score
284
+ avg_score = round(sum(sample_scores) / len(sample_scores), 2) if sample_scores else 0.0
285
+
286
+ # Construct EvalOutputItems
287
+ eval_output_items = [
288
+ EvalOutputItem(id=item.id, score=score, reasoning=reasoning)
289
+ for item, score, reasoning in zip(eval_input.eval_input_items, sample_scores, sample_reasonings)
290
+ ]
193
291
 
194
- if self.default_scoring:
195
- reasoning = {
196
- "question": question,
197
- "answer_description": answer_description,
198
- "generated_answer": generated_answer,
199
- "score_breakdown": {
200
- "coverage_score": coverage_score,
201
- "correctness_score": correctness_score,
202
- "relevance_score": relevance_score,
203
- },
204
- "reasoning": reasoning,
205
- }
206
- else:
207
- reasoning = {
208
- "question": question,
209
- "answer_description": answer_description,
210
- "generated_answer": generated_answer,
211
- "reasoning": reasoning
212
- }
213
-
214
- return EvalOutputItem(id=item.id, score=score, reasoning=reasoning)
292
+ return EvalOutput(average_score=avg_score, eval_output_items=eval_output_items)
@@ -26,6 +26,7 @@ from aiq.data_models.evaluator import EvaluatorBaseConfig
26
26
  class TunableRagEvaluatorConfig(EvaluatorBaseConfig, name="tunable_rag_evaluator"):
27
27
  '''Configuration for tunable RAG evaluator'''
28
28
  llm_name: LLMRef = Field(description="Name of the judge LLM")
29
+ llm_retry_control_params: dict | None = Field(description="Parameters to control LLM retry behavior", default=None)
29
30
  judge_llm_prompt: str = Field(description="LLM prompt for the judge LLM")
30
31
  default_scoring: bool = Field(description="Whether to use default scoring", default=False)
31
32
  default_score_weights: dict = Field(
@@ -43,6 +44,7 @@ async def register_tunable_rag_evaluator(config: TunableRagEvaluatorConfig, buil
43
44
  llm = await builder.get_llm(config.llm_name, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
44
45
  evaluator = TunableRagEvaluator(llm,
45
46
  config.judge_llm_prompt,
47
+ config.llm_retry_control_params,
46
48
  builder.get_max_concurrency(),
47
49
  config.default_scoring,
48
50
  config.default_score_weights)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aiqtoolkit
3
- Version: 1.2.0a20250613
3
+ Version: 1.2.0a20250615
4
4
  Summary: NVIDIA Agent Intelligence toolkit
5
5
  Author: NVIDIA Corporation
6
6
  Maintainer: NVIDIA Corporation
@@ -15,7 +15,7 @@ aiq/agent/rewoo_agent/prompt.py,sha256=2XsuI-db_qmH02ypx_IDvi6jTak15cqt_4pZkUv9T
15
15
  aiq/agent/rewoo_agent/register.py,sha256=MRd2s3nOMYlLzr5Rq5wkl6_HJGhT3im09ylzllyOXT8,8120
16
16
  aiq/agent/tool_calling_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
17
  aiq/agent/tool_calling_agent/agent.py,sha256=U6PD3_fxfdXe6O4WSGC2Hch-Hz_tqvlbiAkpKj7dgkQ,6034
18
- aiq/agent/tool_calling_agent/register.py,sha256=qWY1KmDhpG9AIwM3fO5nsF8zE7lWln5qeLa72sFgUpU,5401
18
+ aiq/agent/tool_calling_agent/register.py,sha256=kqcN2uovVBQxrIx5MszBS65opbhBrCRlAw00TlG2i30,5408
19
19
  aiq/builder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
20
  aiq/builder/builder.py,sha256=JE3uCw6qTmWX2akUfS-Ga5WSiJVTzBjDQPniiONMjy4,7448
21
21
  aiq/builder/component_utils.py,sha256=2jIXWSLIlKxDKAO7kdXz_4BHqQNWw4t9GmcQfw0ER4g,12923
@@ -129,8 +129,8 @@ aiq/eval/trajectory_evaluator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5N
129
129
  aiq/eval/trajectory_evaluator/evaluate.py,sha256=Y51KMhJ9t8AoYWrQlrwipc2CtgIXA9IUGZTbKegtsnw,3257
130
130
  aiq/eval/trajectory_evaluator/register.py,sha256=kktT4fu5_1Cou-iohD3YhQevsWiR3TA5NpFSweVz0eQ,1709
131
131
  aiq/eval/tunable_rag_evaluator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
132
- aiq/eval/tunable_rag_evaluator/evaluate.py,sha256=lZxQDhvcAu0JR1RApkbs-G3T9pUOSfh822TYGp7vrQw,11440
133
- aiq/eval/tunable_rag_evaluator/register.py,sha256=uV36xONVxQW8qBO_bsvbvZk4-J4IhowxiRKErnYsbzA,2369
132
+ aiq/eval/tunable_rag_evaluator/evaluate.py,sha256=xo7gtBI-cOrmk8s6FNLDoMhn2F0ODOxdAtg37i4Vu24,15387
133
+ aiq/eval/tunable_rag_evaluator/register.py,sha256=q4p2rFyMzWmaINJc961ZV4jzIlAN4GfWsoImHo0ovsY,2558
134
134
  aiq/eval/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
135
135
  aiq/eval/utils/output_uploader.py,sha256=SaQbZPkw-Q0H7t5yG60Kh-p1cflR7gPklVkilC4uPbU,5141
136
136
  aiq/eval/utils/tqdm_position_registry.py,sha256=9CtpCk1wtYCSyieHPaSp8nlZu6EcNUOaUz2RTqfekrA,1286
@@ -309,10 +309,10 @@ aiq/utils/reactive/base/observer_base.py,sha256=UAlyAY_ky4q2t0P81RVFo2Bs_R7z5Nde
309
309
  aiq/utils/reactive/base/subject_base.py,sha256=Ed-AC6P7cT3qkW1EXjzbd5M9WpVoeN_9KCe3OM3FLU4,2521
310
310
  aiq/utils/settings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
311
311
  aiq/utils/settings/global_settings.py,sha256=U9TCLdoZsKq5qOVGjREipGVv9e-FlStzqy5zv82_VYk,7454
312
- aiqtoolkit-1.2.0a20250613.dist-info/licenses/LICENSE-3rd-party.txt,sha256=8o7aySJa9CBvFshPcsRdJbczzdNyDGJ8b0J67WRUQ2k,183936
313
- aiqtoolkit-1.2.0a20250613.dist-info/licenses/LICENSE.md,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
314
- aiqtoolkit-1.2.0a20250613.dist-info/METADATA,sha256=0LV-fg4UXDznF9C1ojoVD1qrvT1Spoc0w7duaBn_QVI,20274
315
- aiqtoolkit-1.2.0a20250613.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
316
- aiqtoolkit-1.2.0a20250613.dist-info/entry_points.txt,sha256=gRlPfR5g21t328WNEQ4CcEz80S1sJNS8A7rMDYnzl4A,452
317
- aiqtoolkit-1.2.0a20250613.dist-info/top_level.txt,sha256=fo7AzYcNhZ_tRWrhGumtxwnxMew4xrT1iwouDy_f0Kc,4
318
- aiqtoolkit-1.2.0a20250613.dist-info/RECORD,,
312
+ aiqtoolkit-1.2.0a20250615.dist-info/licenses/LICENSE-3rd-party.txt,sha256=8o7aySJa9CBvFshPcsRdJbczzdNyDGJ8b0J67WRUQ2k,183936
313
+ aiqtoolkit-1.2.0a20250615.dist-info/licenses/LICENSE.md,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
314
+ aiqtoolkit-1.2.0a20250615.dist-info/METADATA,sha256=YNInACugIcEk12fER5CNZbAzW0DxP8UZJksk73asxK4,20274
315
+ aiqtoolkit-1.2.0a20250615.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
316
+ aiqtoolkit-1.2.0a20250615.dist-info/entry_points.txt,sha256=gRlPfR5g21t328WNEQ4CcEz80S1sJNS8A7rMDYnzl4A,452
317
+ aiqtoolkit-1.2.0a20250615.dist-info/top_level.txt,sha256=fo7AzYcNhZ_tRWrhGumtxwnxMew4xrT1iwouDy_f0Kc,4
318
+ aiqtoolkit-1.2.0a20250615.dist-info/RECORD,,