aiqtoolkit 1.1.0a20250501__py3-none-any.whl → 1.1.0a20250502__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiqtoolkit might be problematic. Click here for more details.
- aiq/data_models/api_server.py +67 -51
- aiq/observability/async_otel_listener.py +144 -0
- {aiqtoolkit-1.1.0a20250501.dist-info → aiqtoolkit-1.1.0a20250502.dist-info}/METADATA +3 -1
- {aiqtoolkit-1.1.0a20250501.dist-info → aiqtoolkit-1.1.0a20250502.dist-info}/RECORD +9 -9
- {aiqtoolkit-1.1.0a20250501.dist-info → aiqtoolkit-1.1.0a20250502.dist-info}/WHEEL +0 -0
- {aiqtoolkit-1.1.0a20250501.dist-info → aiqtoolkit-1.1.0a20250502.dist-info}/entry_points.txt +0 -0
- {aiqtoolkit-1.1.0a20250501.dist-info → aiqtoolkit-1.1.0a20250502.dist-info}/licenses/LICENSE-3rd-party.txt +0 -0
- {aiqtoolkit-1.1.0a20250501.dist-info → aiqtoolkit-1.1.0a20250502.dist-info}/licenses/LICENSE.md +0 -0
- {aiqtoolkit-1.1.0a20250501.dist-info → aiqtoolkit-1.1.0a20250502.dist-info}/top_level.txt +0 -0
aiq/data_models/api_server.py
CHANGED
|
@@ -32,8 +32,57 @@ from aiq.data_models.interactive import HumanPrompt
|
|
|
32
32
|
from aiq.utils.type_converter import GlobalTypeConverter
|
|
33
33
|
|
|
34
34
|
|
|
35
|
+
class ChatContentType(str, Enum):
|
|
36
|
+
"""
|
|
37
|
+
ChatContentType is an Enum that represents the type of Chat content.
|
|
38
|
+
"""
|
|
39
|
+
TEXT = "text"
|
|
40
|
+
IMAGE_URL = "image_url"
|
|
41
|
+
INPUT_AUDIO = "input_audio"
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class InputAudio(BaseModel):
|
|
45
|
+
data: str = "default"
|
|
46
|
+
format: str = "default"
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class AudioContent(BaseModel):
|
|
50
|
+
model_config = ConfigDict(extra="forbid")
|
|
51
|
+
|
|
52
|
+
type: typing.Literal[ChatContentType.INPUT_AUDIO] = ChatContentType.INPUT_AUDIO
|
|
53
|
+
input_audio: InputAudio = InputAudio()
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class ImageUrl(BaseModel):
|
|
57
|
+
url: HttpUrl = HttpUrl(url="http://default.com")
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class ImageContent(BaseModel):
|
|
61
|
+
model_config = ConfigDict(extra="forbid")
|
|
62
|
+
|
|
63
|
+
type: typing.Literal[ChatContentType.IMAGE_URL] = ChatContentType.IMAGE_URL
|
|
64
|
+
image_url: ImageUrl = ImageUrl()
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class TextContent(BaseModel):
|
|
68
|
+
model_config = ConfigDict(extra="forbid")
|
|
69
|
+
|
|
70
|
+
type: typing.Literal[ChatContentType.TEXT] = ChatContentType.TEXT
|
|
71
|
+
text: str = "default"
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class Security(BaseModel):
|
|
75
|
+
model_config = ConfigDict(extra="forbid")
|
|
76
|
+
|
|
77
|
+
api_key: str = "default"
|
|
78
|
+
token: str = "default"
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
UserContent = typing.Annotated[TextContent | ImageContent | AudioContent, Discriminator("type")]
|
|
82
|
+
|
|
83
|
+
|
|
35
84
|
class Message(BaseModel):
|
|
36
|
-
content: str
|
|
85
|
+
content: str | list[UserContent]
|
|
37
86
|
role: str
|
|
38
87
|
|
|
39
88
|
|
|
@@ -65,6 +114,20 @@ class AIQChatRequest(BaseModel):
|
|
|
65
114
|
max_tokens=max_tokens,
|
|
66
115
|
top_p=top_p)
|
|
67
116
|
|
|
117
|
+
@staticmethod
|
|
118
|
+
def from_content(content: list[UserContent],
|
|
119
|
+
*,
|
|
120
|
+
model: str | None = None,
|
|
121
|
+
temperature: float | None = None,
|
|
122
|
+
max_tokens: int | None = None,
|
|
123
|
+
top_p: float | None = None) -> "AIQChatRequest":
|
|
124
|
+
|
|
125
|
+
return AIQChatRequest(messages=[Message(content=content, role="user")],
|
|
126
|
+
model=model,
|
|
127
|
+
temperature=temperature,
|
|
128
|
+
max_tokens=max_tokens,
|
|
129
|
+
top_p=top_p)
|
|
130
|
+
|
|
68
131
|
|
|
69
132
|
class AIQChoiceMessage(BaseModel):
|
|
70
133
|
content: str | None = None
|
|
@@ -231,15 +294,6 @@ class UserMessageContentRoleType(str, Enum):
|
|
|
231
294
|
ASSISTANT = "assistant"
|
|
232
295
|
|
|
233
296
|
|
|
234
|
-
class ChatContentType(str, Enum):
|
|
235
|
-
"""
|
|
236
|
-
ChatContentType is an Enum that represents the type of Chat content.
|
|
237
|
-
"""
|
|
238
|
-
TEXT = "text"
|
|
239
|
-
IMAGE_URL = "image_url"
|
|
240
|
-
INPUT_AUDIO = "input_audio"
|
|
241
|
-
|
|
242
|
-
|
|
243
297
|
class WebSocketMessageType(str, Enum):
|
|
244
298
|
"""
|
|
245
299
|
WebSocketMessageType is an Enum that represents WebSocket Message types.
|
|
@@ -270,46 +324,6 @@ class WebSocketMessageStatus(str, Enum):
|
|
|
270
324
|
COMPLETE = "complete"
|
|
271
325
|
|
|
272
326
|
|
|
273
|
-
class InputAudio(BaseModel):
|
|
274
|
-
data: str = "default"
|
|
275
|
-
format: str = "default"
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
class AudioContent(BaseModel):
|
|
279
|
-
model_config = ConfigDict(extra="forbid")
|
|
280
|
-
|
|
281
|
-
type: typing.Literal[ChatContentType.INPUT_AUDIO] = ChatContentType.INPUT_AUDIO
|
|
282
|
-
input_audio: InputAudio = InputAudio()
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
class ImageUrl(BaseModel):
|
|
286
|
-
url: HttpUrl = HttpUrl(url="http://default.com")
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
class ImageContent(BaseModel):
|
|
290
|
-
model_config = ConfigDict(extra="forbid")
|
|
291
|
-
|
|
292
|
-
type: typing.Literal[ChatContentType.IMAGE_URL] = ChatContentType.IMAGE_URL
|
|
293
|
-
image_url: ImageUrl = ImageUrl()
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
class TextContent(BaseModel):
|
|
297
|
-
model_config = ConfigDict(extra="forbid")
|
|
298
|
-
|
|
299
|
-
type: typing.Literal[ChatContentType.TEXT] = ChatContentType.TEXT
|
|
300
|
-
text: str = "default"
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
class Security(BaseModel):
|
|
304
|
-
model_config = ConfigDict(extra="forbid")
|
|
305
|
-
|
|
306
|
-
api_key: str = "default"
|
|
307
|
-
token: str = "default"
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
UserContent = typing.Annotated[TextContent | ImageContent | AudioContent, Discriminator("type")]
|
|
311
|
-
|
|
312
|
-
|
|
313
327
|
class UserMessages(BaseModel):
|
|
314
328
|
model_config = ConfigDict(extra="forbid")
|
|
315
329
|
|
|
@@ -487,7 +501,9 @@ GlobalTypeConverter.register_converter(_generate_response_to_chat_response)
|
|
|
487
501
|
|
|
488
502
|
# ======== AIQChatRequest Converters ========
|
|
489
503
|
def _aiq_chat_request_to_string(data: AIQChatRequest) -> str:
|
|
490
|
-
|
|
504
|
+
if isinstance(data.messages[-1].content, str):
|
|
505
|
+
return data.messages[-1].content
|
|
506
|
+
return str(data.messages[-1].content)
|
|
491
507
|
|
|
492
508
|
|
|
493
509
|
GlobalTypeConverter.register_converter(_aiq_chat_request_to_string)
|
|
@@ -16,6 +16,7 @@
|
|
|
16
16
|
import logging
|
|
17
17
|
import re
|
|
18
18
|
from contextlib import asynccontextmanager
|
|
19
|
+
from contextlib import contextmanager
|
|
19
20
|
from typing import Any
|
|
20
21
|
|
|
21
22
|
from openinference.semconv.trace import OpenInferenceSpanKindValues
|
|
@@ -30,6 +31,17 @@ from aiq.builder.context import AIQContextState
|
|
|
30
31
|
from aiq.data_models.intermediate_step import IntermediateStep
|
|
31
32
|
from aiq.data_models.intermediate_step import IntermediateStepState
|
|
32
33
|
|
|
34
|
+
try:
|
|
35
|
+
from weave.trace.context import weave_client_context
|
|
36
|
+
from weave.trace.context.call_context import get_current_call
|
|
37
|
+
from weave.trace.context.call_context import set_call_stack
|
|
38
|
+
from weave.trace.weave_client import Call
|
|
39
|
+
WEAVE_AVAILABLE = True
|
|
40
|
+
except ImportError:
|
|
41
|
+
WEAVE_AVAILABLE = False
|
|
42
|
+
# we simply don't do anything if weave is not available
|
|
43
|
+
pass
|
|
44
|
+
|
|
33
45
|
logger = logging.getLogger(__name__)
|
|
34
46
|
|
|
35
47
|
OPENINFERENCE_SPAN_KIND = SpanAttributes.OPENINFERENCE_SPAN_KIND
|
|
@@ -84,6 +96,17 @@ class AsyncOtelSpanListener:
|
|
|
84
96
|
|
|
85
97
|
self._tracer = trace.get_tracer("aiq-async-otel-listener")
|
|
86
98
|
|
|
99
|
+
# Initialize Weave-specific components if available
|
|
100
|
+
self.gc = None
|
|
101
|
+
self._weave_calls = {}
|
|
102
|
+
if WEAVE_AVAILABLE:
|
|
103
|
+
try:
|
|
104
|
+
# Try to get the weave client, but don't fail if Weave isn't initialized
|
|
105
|
+
self.gc = weave_client_context.require_weave_client()
|
|
106
|
+
except Exception:
|
|
107
|
+
# Weave is not initialized, so we don't do anything
|
|
108
|
+
pass
|
|
109
|
+
|
|
87
110
|
def _on_next(self, step: IntermediateStep) -> None:
|
|
88
111
|
"""
|
|
89
112
|
The main logic that reacts to each IntermediateStep.
|
|
@@ -159,6 +182,12 @@ class AsyncOtelSpanListener:
|
|
|
159
182
|
|
|
160
183
|
self._span_stack.clear()
|
|
161
184
|
|
|
185
|
+
# Clean up any lingering Weave calls if Weave is available and initialized
|
|
186
|
+
if self.gc is not None and self._weave_calls:
|
|
187
|
+
for _, call in list(self._weave_calls.items()):
|
|
188
|
+
self.gc.finish_call(call, {"status": "incomplete"})
|
|
189
|
+
self._weave_calls.clear()
|
|
190
|
+
|
|
162
191
|
def _serialize_payload(self, input_value: Any) -> tuple[str, bool]:
|
|
163
192
|
"""
|
|
164
193
|
Serialize the input value to a string. Returns a tuple with the serialized value and a boolean indicating if the
|
|
@@ -237,6 +266,10 @@ class AsyncOtelSpanListener:
|
|
|
237
266
|
|
|
238
267
|
self._outstanding_spans[step.UUID] = sub_span
|
|
239
268
|
|
|
269
|
+
# Create corresponding Weave call if Weave is available and initialized
|
|
270
|
+
if self.gc is not None:
|
|
271
|
+
self._create_weave_call(step, sub_span)
|
|
272
|
+
|
|
240
273
|
def _process_end_event(self, step: IntermediateStep):
|
|
241
274
|
|
|
242
275
|
# Find the subspan that was created in the start event
|
|
@@ -271,3 +304,114 @@ class AsyncOtelSpanListener:
|
|
|
271
304
|
|
|
272
305
|
# End the subspan
|
|
273
306
|
sub_span.end(end_time=end_ns)
|
|
307
|
+
|
|
308
|
+
# Finish corresponding Weave call if Weave is available and initialized
|
|
309
|
+
if self.gc is not None:
|
|
310
|
+
self._finish_weave_call(step, sub_span)
|
|
311
|
+
|
|
312
|
+
@contextmanager
|
|
313
|
+
def parent_call(self, trace_id: str, parent_call_id: str):
|
|
314
|
+
"""Context manager to set a parent call context for Weave.
|
|
315
|
+
This allows connecting AIQ spans to existing traces from other frameworks.
|
|
316
|
+
"""
|
|
317
|
+
dummy_call = Call(trace_id=trace_id, id=parent_call_id, _op_name="", project_id="", parent_id=None, inputs={})
|
|
318
|
+
with set_call_stack([dummy_call]):
|
|
319
|
+
yield
|
|
320
|
+
|
|
321
|
+
def _create_weave_call(self, step: IntermediateStep, span: Span) -> None:
|
|
322
|
+
"""
|
|
323
|
+
Create a Weave call directly from the span and step data,
|
|
324
|
+
connecting to existing framework traces if available.
|
|
325
|
+
"""
|
|
326
|
+
# Check for existing Weave trace/call
|
|
327
|
+
existing_call = get_current_call()
|
|
328
|
+
|
|
329
|
+
# Extract parent call if applicable
|
|
330
|
+
parent_call = None
|
|
331
|
+
|
|
332
|
+
# If we have an existing Weave call from another framework (e.g., LangChain),
|
|
333
|
+
# use it as the parent
|
|
334
|
+
if existing_call is not None:
|
|
335
|
+
parent_call = existing_call
|
|
336
|
+
logger.debug(f"Found existing Weave call: {existing_call.id} from trace: {existing_call.trace_id}")
|
|
337
|
+
# Otherwise, check our internal stack for parent relationships
|
|
338
|
+
elif len(self._weave_calls) > 0 and len(self._span_stack) > 1:
|
|
339
|
+
# Get the parent span using stack position (one level up)
|
|
340
|
+
parent_span_id = self._span_stack[-2].get_span_context().span_id
|
|
341
|
+
# Find the corresponding weave call for this parent span
|
|
342
|
+
for uuid, call in self._weave_calls.items():
|
|
343
|
+
if getattr(call, "span_id", None) == parent_span_id:
|
|
344
|
+
parent_call = call
|
|
345
|
+
break
|
|
346
|
+
|
|
347
|
+
# Generate a meaningful operation name based on event type
|
|
348
|
+
event_type = step.payload.event_type.split(".")[-1]
|
|
349
|
+
if step.payload.name:
|
|
350
|
+
op_name = f"aiq.{event_type}.{step.payload.name}"
|
|
351
|
+
else:
|
|
352
|
+
op_name = f"aiq.{event_type}"
|
|
353
|
+
|
|
354
|
+
# Create input dictionary
|
|
355
|
+
inputs = {}
|
|
356
|
+
if step.payload.data and step.payload.data.input is not None:
|
|
357
|
+
try:
|
|
358
|
+
# Add the input to the Weave call
|
|
359
|
+
inputs["input"] = step.payload.data.input
|
|
360
|
+
except Exception:
|
|
361
|
+
# If serialization fails, use string representation
|
|
362
|
+
inputs["input"] = str(step.payload.data.input)
|
|
363
|
+
|
|
364
|
+
# Create the Weave call
|
|
365
|
+
call = self.gc.create_call(
|
|
366
|
+
op_name,
|
|
367
|
+
inputs=inputs,
|
|
368
|
+
parent=parent_call,
|
|
369
|
+
attributes=span.attributes,
|
|
370
|
+
display_name=op_name,
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
# Store the call with step UUID as key
|
|
374
|
+
self._weave_calls[step.UUID] = call
|
|
375
|
+
|
|
376
|
+
# Store span ID for parent reference
|
|
377
|
+
setattr(call, "span_id", span.get_span_context().span_id)
|
|
378
|
+
|
|
379
|
+
return call
|
|
380
|
+
|
|
381
|
+
def _finish_weave_call(self, step: IntermediateStep, span: Span) -> None:
|
|
382
|
+
"""
|
|
383
|
+
Finish a previously created Weave call
|
|
384
|
+
"""
|
|
385
|
+
# Find the call for this step
|
|
386
|
+
call = self._weave_calls.pop(step.UUID, None)
|
|
387
|
+
|
|
388
|
+
if call is None:
|
|
389
|
+
logger.warning("No Weave call found for step %s", step.UUID)
|
|
390
|
+
return
|
|
391
|
+
|
|
392
|
+
# Create output dictionary
|
|
393
|
+
outputs = {}
|
|
394
|
+
if step.payload.data and step.payload.data.output is not None:
|
|
395
|
+
try:
|
|
396
|
+
# Add the output to the Weave call
|
|
397
|
+
outputs["output"] = step.payload.data.output
|
|
398
|
+
except Exception:
|
|
399
|
+
# If serialization fails, use string representation
|
|
400
|
+
outputs["output"] = str(step.payload.data.output)
|
|
401
|
+
|
|
402
|
+
# Add usage information if available
|
|
403
|
+
usage_info = step.payload.usage_info
|
|
404
|
+
if usage_info:
|
|
405
|
+
if usage_info.token_usage:
|
|
406
|
+
outputs["prompt_tokens"] = usage_info.token_usage.prompt_tokens
|
|
407
|
+
outputs["completion_tokens"] = usage_info.token_usage.completion_tokens
|
|
408
|
+
outputs["total_tokens"] = usage_info.token_usage.total_tokens
|
|
409
|
+
|
|
410
|
+
if usage_info.num_llm_calls:
|
|
411
|
+
outputs["num_llm_calls"] = usage_info.num_llm_calls
|
|
412
|
+
|
|
413
|
+
if usage_info.seconds_between_calls:
|
|
414
|
+
outputs["seconds_between_calls"] = usage_info.seconds_between_calls
|
|
415
|
+
|
|
416
|
+
# Finish the call with outputs
|
|
417
|
+
self.gc.finish_call(call, outputs)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: aiqtoolkit
|
|
3
|
-
Version: 1.1.
|
|
3
|
+
Version: 1.1.0a20250502
|
|
4
4
|
Summary: Agent Intelligence Toolkit (AIQ Toolkit)
|
|
5
5
|
Author: NVIDIA Corporation
|
|
6
6
|
Maintainer: NVIDIA Corporation
|
|
@@ -250,6 +250,8 @@ Provides-Extra: zep-cloud
|
|
|
250
250
|
Requires-Dist: aiqtoolkit-zep-cloud; extra == "zep-cloud"
|
|
251
251
|
Provides-Extra: agno
|
|
252
252
|
Requires-Dist: aiqtoolkit-agno; extra == "agno"
|
|
253
|
+
Provides-Extra: weave
|
|
254
|
+
Requires-Dist: aiqtoolkit-weave; extra == "weave"
|
|
253
255
|
Provides-Extra: examples
|
|
254
256
|
Requires-Dist: aiq_email_phishing_analyzer; extra == "examples"
|
|
255
257
|
Requires-Dist: aiq_multi_frameworks; extra == "examples"
|
|
@@ -73,7 +73,7 @@ aiq/cli/commands/workflow/templates/pyproject.toml.j2,sha256=tDV7-vbt8Of82OEdSOi
|
|
|
73
73
|
aiq/cli/commands/workflow/templates/register.py.j2,sha256=SlOFmIZakPDu_E6DbIhUZ3yP8KhTrAQCFGBuhy9Fyg4,170
|
|
74
74
|
aiq/cli/commands/workflow/templates/workflow.py.j2,sha256=NRp0MP8GtZByk7lrHp2Y5_6iEopRK2Wyrt0v0_2qQeo,1226
|
|
75
75
|
aiq/data_models/__init__.py,sha256=Xs1JQ16L9btwreh4pdGKwskffAw1YFO48jKrU4ib_7c,685
|
|
76
|
-
aiq/data_models/api_server.py,sha256=
|
|
76
|
+
aiq/data_models/api_server.py,sha256=7BY7meyACBhrchO2-I1-X2_DhNoezdjJ5bGihK7eUyI,17533
|
|
77
77
|
aiq/data_models/common.py,sha256=G63rUXvDAtK6p1SrRyH0VlHGqrDgCZVVjbnzgGSl2Ic,4213
|
|
78
78
|
aiq/data_models/component.py,sha256=x6jm1Fhn1k1hGu-5AjM0ywuyvs6ztaZfapD8bLUXSqc,1469
|
|
79
79
|
aiq/data_models/component_ref.py,sha256=GyyIf4k80aUIn6LV9r84m5imbiVhpdaY7uKMMpYpbzU,3872
|
|
@@ -172,7 +172,7 @@ aiq/memory/models.py,sha256=4TZW2VSroLx0Ea11F_33_Rmx1diAk1jFpz-45jyPpnc,4026
|
|
|
172
172
|
aiq/meta/module_to_distro.json,sha256=1XV7edobFrdDKvsSoynfodXg_hczUWpDrQzGkW9qqEs,28
|
|
173
173
|
aiq/meta/pypi.md,sha256=LZTol6Cn-tU0cp9DynN6xCKRPONA9PVKWk3isU15SLc,4296
|
|
174
174
|
aiq/observability/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
175
|
-
aiq/observability/async_otel_listener.py,sha256=
|
|
175
|
+
aiq/observability/async_otel_listener.py,sha256=nDhJWDrsBygt03c0vsLWlB6f3ZzOh-3VjGgnJyw9vEc,17015
|
|
176
176
|
aiq/observability/register.py,sha256=eCi76I9fDPWIckKDTgUN1zeinI3mPrHvhU3XkROT6G8,3921
|
|
177
177
|
aiq/plugins/.namespace,sha256=Gace0pOC3ETEJf-TBVuNw0TQV6J_KtOPpEiSzMH-odo,215
|
|
178
178
|
aiq/profiler/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -303,10 +303,10 @@ aiq/utils/reactive/base/observer_base.py,sha256=UAlyAY_ky4q2t0P81RVFo2Bs_R7z5Nde
|
|
|
303
303
|
aiq/utils/reactive/base/subject_base.py,sha256=Ed-AC6P7cT3qkW1EXjzbd5M9WpVoeN_9KCe3OM3FLU4,2521
|
|
304
304
|
aiq/utils/settings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
305
305
|
aiq/utils/settings/global_settings.py,sha256=U9TCLdoZsKq5qOVGjREipGVv9e-FlStzqy5zv82_VYk,7454
|
|
306
|
-
aiqtoolkit-1.1.
|
|
307
|
-
aiqtoolkit-1.1.
|
|
308
|
-
aiqtoolkit-1.1.
|
|
309
|
-
aiqtoolkit-1.1.
|
|
310
|
-
aiqtoolkit-1.1.
|
|
311
|
-
aiqtoolkit-1.1.
|
|
312
|
-
aiqtoolkit-1.1.
|
|
306
|
+
aiqtoolkit-1.1.0a20250502.dist-info/licenses/LICENSE-3rd-party.txt,sha256=8o7aySJa9CBvFshPcsRdJbczzdNyDGJ8b0J67WRUQ2k,183936
|
|
307
|
+
aiqtoolkit-1.1.0a20250502.dist-info/licenses/LICENSE.md,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
|
|
308
|
+
aiqtoolkit-1.1.0a20250502.dist-info/METADATA,sha256=Gjr7NeisyZd3siwKG7hoQHUDT_Z4hGDfjGqVqqQskYs,19836
|
|
309
|
+
aiqtoolkit-1.1.0a20250502.dist-info/WHEEL,sha256=wXxTzcEDnjrTwFYjLPcsW_7_XihufBwmpiBeiXNBGEA,91
|
|
310
|
+
aiqtoolkit-1.1.0a20250502.dist-info/entry_points.txt,sha256=gRlPfR5g21t328WNEQ4CcEz80S1sJNS8A7rMDYnzl4A,452
|
|
311
|
+
aiqtoolkit-1.1.0a20250502.dist-info/top_level.txt,sha256=fo7AzYcNhZ_tRWrhGumtxwnxMew4xrT1iwouDy_f0Kc,4
|
|
312
|
+
aiqtoolkit-1.1.0a20250502.dist-info/RECORD,,
|
|
File without changes
|
{aiqtoolkit-1.1.0a20250501.dist-info → aiqtoolkit-1.1.0a20250502.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
|
File without changes
|
{aiqtoolkit-1.1.0a20250501.dist-info → aiqtoolkit-1.1.0a20250502.dist-info}/licenses/LICENSE.md
RENAMED
|
File without changes
|
|
File without changes
|