ai-pipeline-core 0.2.6__py3-none-any.whl → 0.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_pipeline_core/__init__.py +1 -1
- ai_pipeline_core/flow/options.py +2 -2
- ai_pipeline_core/llm/ai_messages.py +5 -2
- ai_pipeline_core/llm/client.py +47 -19
- ai_pipeline_core/llm/model_options.py +3 -3
- ai_pipeline_core/llm/model_response.py +1 -1
- ai_pipeline_core/llm/model_types.py +1 -1
- ai_pipeline_core/utils/deploy.py +20 -35
- {ai_pipeline_core-0.2.6.dist-info → ai_pipeline_core-0.2.8.dist-info}/METADATA +1 -1
- {ai_pipeline_core-0.2.6.dist-info → ai_pipeline_core-0.2.8.dist-info}/RECORD +12 -12
- {ai_pipeline_core-0.2.6.dist-info → ai_pipeline_core-0.2.8.dist-info}/WHEEL +0 -0
- {ai_pipeline_core-0.2.6.dist-info → ai_pipeline_core-0.2.8.dist-info}/licenses/LICENSE +0 -0
ai_pipeline_core/__init__.py
CHANGED
ai_pipeline_core/flow/options.py
CHANGED
|
@@ -61,11 +61,11 @@ class FlowOptions(BaseSettings):
|
|
|
61
61
|
"""
|
|
62
62
|
|
|
63
63
|
core_model: ModelName = Field(
|
|
64
|
-
default="
|
|
64
|
+
default="gemini-2.5-pro",
|
|
65
65
|
description="Primary model for complex analysis and generation tasks.",
|
|
66
66
|
)
|
|
67
67
|
small_model: ModelName = Field(
|
|
68
|
-
default="
|
|
68
|
+
default="grok-4-fast",
|
|
69
69
|
description="Fast, cost-effective model for simple tasks and orchestration.",
|
|
70
70
|
)
|
|
71
71
|
|
|
@@ -260,11 +260,14 @@ class AIMessages(list[AIMessageType]):
|
|
|
260
260
|
|
|
261
261
|
for message in self:
|
|
262
262
|
if isinstance(message, str):
|
|
263
|
-
messages.append({"role": "user", "content": message})
|
|
263
|
+
messages.append({"role": "user", "content": [{"type": "text", "text": message}]})
|
|
264
264
|
elif isinstance(message, Document):
|
|
265
265
|
messages.append({"role": "user", "content": AIMessages.document_to_prompt(message)})
|
|
266
266
|
elif isinstance(message, ModelResponse): # type: ignore
|
|
267
|
-
messages.append({
|
|
267
|
+
messages.append({
|
|
268
|
+
"role": "assistant",
|
|
269
|
+
"content": [{"type": "text", "text": message.content}],
|
|
270
|
+
})
|
|
268
271
|
else:
|
|
269
272
|
raise ValueError(f"Unsupported message type: {type(message)}")
|
|
270
273
|
|
ai_pipeline_core/llm/client.py
CHANGED
|
@@ -39,7 +39,7 @@ def _process_messages(
|
|
|
39
39
|
context: AIMessages,
|
|
40
40
|
messages: AIMessages,
|
|
41
41
|
system_prompt: str | None = None,
|
|
42
|
-
cache_ttl: str | None = "
|
|
42
|
+
cache_ttl: str | None = "300s",
|
|
43
43
|
) -> list[ChatCompletionMessageParam]:
|
|
44
44
|
"""Process and format messages for LLM API consumption.
|
|
45
45
|
|
|
@@ -51,7 +51,7 @@ def _process_messages(
|
|
|
51
51
|
context: Messages to be cached (typically expensive/static content).
|
|
52
52
|
messages: Regular messages without caching (dynamic queries).
|
|
53
53
|
system_prompt: Optional system instructions for the model.
|
|
54
|
-
cache_ttl: Cache TTL for context messages (e.g. "120s", "
|
|
54
|
+
cache_ttl: Cache TTL for context messages (e.g. "120s", "300s", "1h").
|
|
55
55
|
Set to None or empty string to disable caching.
|
|
56
56
|
|
|
57
57
|
Returns:
|
|
@@ -86,12 +86,17 @@ def _process_messages(
|
|
|
86
86
|
# Use AIMessages.to_prompt() for context
|
|
87
87
|
context_messages = context.to_prompt()
|
|
88
88
|
|
|
89
|
-
# Apply caching to last context message if cache_ttl is set
|
|
89
|
+
# Apply caching to last context message and last content part if cache_ttl is set
|
|
90
90
|
if cache_ttl:
|
|
91
91
|
context_messages[-1]["cache_control"] = { # type: ignore
|
|
92
92
|
"type": "ephemeral",
|
|
93
93
|
"ttl": cache_ttl,
|
|
94
94
|
}
|
|
95
|
+
assert isinstance(context_messages[-1]["content"], list) # type: ignore
|
|
96
|
+
context_messages[-1]["content"][-1]["cache_control"] = { # type: ignore
|
|
97
|
+
"type": "ephemeral",
|
|
98
|
+
"ttl": cache_ttl,
|
|
99
|
+
}
|
|
95
100
|
|
|
96
101
|
processed_messages.extend(context_messages)
|
|
97
102
|
|
|
@@ -103,6 +108,38 @@ def _process_messages(
|
|
|
103
108
|
return processed_messages
|
|
104
109
|
|
|
105
110
|
|
|
111
|
+
def _remove_cache_control(
|
|
112
|
+
messages: list[ChatCompletionMessageParam],
|
|
113
|
+
) -> list[ChatCompletionMessageParam]:
|
|
114
|
+
"""Remove cache control directives from messages.
|
|
115
|
+
|
|
116
|
+
Internal utility that strips cache_control fields from both message-level
|
|
117
|
+
and content-level entries. Used in retry logic when cache-related errors
|
|
118
|
+
occur during LLM API calls.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
messages: List of messages that may contain cache_control directives.
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
The same message list (modified in-place) with all cache_control
|
|
125
|
+
fields removed from both messages and their content items.
|
|
126
|
+
|
|
127
|
+
Note:
|
|
128
|
+
This function modifies the input list in-place but also returns it
|
|
129
|
+
for convenience. Handles both list-based content (multipart) and
|
|
130
|
+
string content (simple messages).
|
|
131
|
+
"""
|
|
132
|
+
for message in messages:
|
|
133
|
+
if content := message.get("content"):
|
|
134
|
+
if isinstance(content, list):
|
|
135
|
+
for item in content:
|
|
136
|
+
if "cache_control" in item:
|
|
137
|
+
del item["cache_control"]
|
|
138
|
+
if "cache_control" in message:
|
|
139
|
+
del message["cache_control"]
|
|
140
|
+
return messages # type: ignore
|
|
141
|
+
|
|
142
|
+
|
|
106
143
|
def _model_name_to_openrouter_model(model: ModelName) -> str:
|
|
107
144
|
"""Convert a model name to an OpenRouter model name.
|
|
108
145
|
|
|
@@ -237,6 +274,10 @@ async def _generate_with_retry(
|
|
|
237
274
|
if not context and not messages:
|
|
238
275
|
raise ValueError("Either context or messages must be provided")
|
|
239
276
|
|
|
277
|
+
if "gemini" in model.lower() and context.approximate_tokens_count < 10000:
|
|
278
|
+
# Bug fix for minimum explicit context size for Gemini models
|
|
279
|
+
options.cache_ttl = None
|
|
280
|
+
|
|
240
281
|
processed_messages = _process_messages(
|
|
241
282
|
context, messages, options.system_prompt, options.cache_ttl
|
|
242
283
|
)
|
|
@@ -263,6 +304,8 @@ async def _generate_with_retry(
|
|
|
263
304
|
if not isinstance(e, asyncio.TimeoutError):
|
|
264
305
|
# disable cache if it's not a timeout because it may cause an error
|
|
265
306
|
completion_kwargs["extra_body"]["cache"] = {"no-cache": True}
|
|
307
|
+
# sometimes there are issues with cache so cache is removed in case of failure
|
|
308
|
+
processed_messages = _remove_cache_control(processed_messages)
|
|
266
309
|
|
|
267
310
|
logger.warning(
|
|
268
311
|
f"LLM generation failed (attempt {attempt + 1}/{options.retries}): {e}",
|
|
@@ -374,26 +417,11 @@ async def generate(
|
|
|
374
417
|
... ])
|
|
375
418
|
>>> response = await llm.generate("gpt-5", messages=messages)
|
|
376
419
|
|
|
377
|
-
Configuration via LiteLLM Proxy:
|
|
378
|
-
>>> # Configure temperature in litellm_config.yaml:
|
|
379
|
-
>>> # model_list:
|
|
380
|
-
>>> # - model_name: gpt-5
|
|
381
|
-
>>> # litellm_params:
|
|
382
|
-
>>> # model: openai/gpt-4o
|
|
383
|
-
>>> # temperature: 0.3
|
|
384
|
-
>>> # max_tokens: 1000
|
|
385
|
-
>>>
|
|
386
|
-
>>> # Configure retry logic in proxy:
|
|
387
|
-
>>> # general_settings:
|
|
388
|
-
>>> # master_key: sk-1234
|
|
389
|
-
>>> # max_retries: 5
|
|
390
|
-
>>> # retry_delay: 15
|
|
391
|
-
|
|
392
420
|
Performance:
|
|
393
421
|
- Context caching saves ~50-90% tokens on repeated calls
|
|
394
422
|
- First call: full token cost
|
|
395
423
|
- Subsequent calls (within cache TTL): only messages tokens
|
|
396
|
-
- Default cache TTL is
|
|
424
|
+
- Default cache TTL is 300s/5 minutes (production-optimized)
|
|
397
425
|
- Default retry logic: 3 attempts with 10s delay (production-optimized)
|
|
398
426
|
|
|
399
427
|
Caching:
|
|
@@ -45,7 +45,7 @@ class ModelOptions(BaseModel):
|
|
|
45
45
|
|
|
46
46
|
timeout: Maximum seconds to wait for response (default: 300).
|
|
47
47
|
|
|
48
|
-
cache_ttl: Cache TTL for context messages (default: "
|
|
48
|
+
cache_ttl: Cache TTL for context messages (default: "300s").
|
|
49
49
|
String format like "60s", "5m", or None to disable caching.
|
|
50
50
|
Applied to the last context message for efficient token reuse.
|
|
51
51
|
|
|
@@ -165,7 +165,7 @@ class ModelOptions(BaseModel):
|
|
|
165
165
|
- search_context_size only works with search models
|
|
166
166
|
- reasoning_effort only works with models that support explicit reasoning
|
|
167
167
|
- response_format is set internally by generate_structured()
|
|
168
|
-
- cache_ttl accepts formats like "120s", "5m"
|
|
168
|
+
- cache_ttl accepts formats like "120s", "5m", "1h" or None (default: "300s")
|
|
169
169
|
- stop sequences are limited to 4 by most providers
|
|
170
170
|
- user identifier helps track costs per end-user (max 256 chars)
|
|
171
171
|
- extra_body allows passing provider-specific parameters
|
|
@@ -179,7 +179,7 @@ class ModelOptions(BaseModel):
|
|
|
179
179
|
retries: int = 3
|
|
180
180
|
retry_delay_seconds: int = 20
|
|
181
181
|
timeout: int = 600
|
|
182
|
-
cache_ttl: str | None = "
|
|
182
|
+
cache_ttl: str | None = "300s"
|
|
183
183
|
service_tier: Literal["auto", "default", "flex", "scale", "priority"] | None = None
|
|
184
184
|
max_completion_tokens: int | None = None
|
|
185
185
|
stop: str | list[str] | None = None
|
|
@@ -266,7 +266,7 @@ class ModelResponse(ChatCompletion):
|
|
|
266
266
|
|
|
267
267
|
other_fields = self.__dict__
|
|
268
268
|
for key, value in other_fields.items():
|
|
269
|
-
if key in ["_model_options", "_metadata", "choices"
|
|
269
|
+
if key in ["_model_options", "_metadata", "choices"]:
|
|
270
270
|
continue
|
|
271
271
|
try:
|
|
272
272
|
metadata[f"response.raw.{key}"] = json.dumps(value, indent=2, default=str)
|
ai_pipeline_core/utils/deploy.py
CHANGED
|
@@ -7,8 +7,8 @@ This script:
|
|
|
7
7
|
3. Creates/updates a Prefect deployment using the RunnerDeployment pattern
|
|
8
8
|
|
|
9
9
|
Requirements:
|
|
10
|
-
-
|
|
11
|
-
-
|
|
10
|
+
- Settings configured with PREFECT_API_URL and optionally PREFECT_API_KEY
|
|
11
|
+
- Settings configured with PREFECT_GCS_BUCKET
|
|
12
12
|
- pyproject.toml with project name and version
|
|
13
13
|
- Local package installed for flow metadata extraction
|
|
14
14
|
|
|
@@ -18,7 +18,6 @@ Usage:
|
|
|
18
18
|
|
|
19
19
|
import argparse
|
|
20
20
|
import asyncio
|
|
21
|
-
import os
|
|
22
21
|
import subprocess
|
|
23
22
|
import sys
|
|
24
23
|
import tomllib
|
|
@@ -34,14 +33,6 @@ from prefect.flows import load_flow_from_entrypoint
|
|
|
34
33
|
from ai_pipeline_core.settings import settings
|
|
35
34
|
from ai_pipeline_core.storage import Storage
|
|
36
35
|
|
|
37
|
-
# ============================================================================
|
|
38
|
-
# Configuration
|
|
39
|
-
# ============================================================================
|
|
40
|
-
|
|
41
|
-
WORK_POOL_NAME = settings.prefect_work_pool_name
|
|
42
|
-
DEFAULT_WORK_QUEUE = settings.prefect_work_queue_name
|
|
43
|
-
PREDEFINED_BUCKET = settings.prefect_gcs_bucket
|
|
44
|
-
|
|
45
36
|
# ============================================================================
|
|
46
37
|
# Deployer Class
|
|
47
38
|
# ============================================================================
|
|
@@ -57,7 +48,7 @@ class Deployer:
|
|
|
57
48
|
def __init__(self):
|
|
58
49
|
"""Initialize deployer."""
|
|
59
50
|
self.config = self._load_config()
|
|
60
|
-
self.
|
|
51
|
+
self._validate_prefect_settings()
|
|
61
52
|
|
|
62
53
|
def _load_config(self) -> dict[str, Any]:
|
|
63
54
|
"""Load and normalize project configuration from pyproject.toml.
|
|
@@ -65,10 +56,10 @@ class Deployer:
|
|
|
65
56
|
Returns:
|
|
66
57
|
Configuration dictionary with project metadata and deployment settings.
|
|
67
58
|
"""
|
|
68
|
-
if not
|
|
59
|
+
if not settings.prefect_gcs_bucket:
|
|
69
60
|
self._die(
|
|
70
|
-
"PREFECT_GCS_BUCKET not
|
|
71
|
-
"
|
|
61
|
+
"PREFECT_GCS_BUCKET not configured in settings.\n"
|
|
62
|
+
"Configure via environment variable or .env file:\n"
|
|
72
63
|
" PREFECT_GCS_BUCKET=your-bucket-name"
|
|
73
64
|
)
|
|
74
65
|
|
|
@@ -97,33 +88,23 @@ class Deployer:
|
|
|
97
88
|
"name": name,
|
|
98
89
|
"package": package_name,
|
|
99
90
|
"version": version,
|
|
100
|
-
"bucket":
|
|
91
|
+
"bucket": settings.prefect_gcs_bucket,
|
|
101
92
|
"folder": f"flows/{flow_folder}",
|
|
102
93
|
"tarball": f"{package_name}-{version}.tar.gz",
|
|
103
|
-
"work_pool":
|
|
104
|
-
"work_queue":
|
|
94
|
+
"work_pool": settings.prefect_work_pool_name,
|
|
95
|
+
"work_queue": settings.prefect_work_queue_name,
|
|
105
96
|
}
|
|
106
97
|
|
|
107
|
-
def
|
|
108
|
-
"""
|
|
109
|
-
self.api_url =
|
|
98
|
+
def _validate_prefect_settings(self):
|
|
99
|
+
"""Validate that required Prefect settings are configured."""
|
|
100
|
+
self.api_url = settings.prefect_api_url
|
|
110
101
|
if not self.api_url:
|
|
111
102
|
self._die(
|
|
112
|
-
"PREFECT_API_URL not
|
|
113
|
-
"
|
|
103
|
+
"PREFECT_API_URL not configured in settings.\n"
|
|
104
|
+
"Configure via environment variable or .env file:\n"
|
|
114
105
|
" PREFECT_API_URL=https://api.prefect.cloud/api/accounts/.../workspaces/..."
|
|
115
106
|
)
|
|
116
107
|
|
|
117
|
-
os.environ["PREFECT_API_URL"] = self.api_url
|
|
118
|
-
|
|
119
|
-
# Optional: API key for authentication
|
|
120
|
-
if api_key := os.getenv("PREFECT_API_KEY"):
|
|
121
|
-
os.environ["PREFECT_API_KEY"] = api_key
|
|
122
|
-
|
|
123
|
-
# Optional: Alternative auth method
|
|
124
|
-
if api_auth := os.getenv("PREFECT_API_AUTH_STRING"):
|
|
125
|
-
os.environ["PREFECT_API_AUTH_STRING"] = api_auth
|
|
126
|
-
|
|
127
108
|
def _run(self, cmd: str, check: bool = True) -> Optional[str]:
|
|
128
109
|
"""Execute shell command and return output.
|
|
129
110
|
|
|
@@ -345,12 +326,16 @@ Example:
|
|
|
345
326
|
python -m ai_pipeline_core.utils.deploy
|
|
346
327
|
|
|
347
328
|
Prerequisites:
|
|
348
|
-
-
|
|
349
|
-
-
|
|
329
|
+
- Settings configured with PREFECT_API_URL (and optionally PREFECT_API_KEY)
|
|
330
|
+
- Settings configured with PREFECT_GCS_BUCKET
|
|
350
331
|
- pyproject.toml with project name and version
|
|
351
332
|
- Package installed locally: pip install -e .
|
|
352
333
|
- GCP authentication configured (via service account or default credentials)
|
|
353
334
|
- Work pool created in Prefect UI or CLI
|
|
335
|
+
|
|
336
|
+
Settings can be configured via:
|
|
337
|
+
- Environment variables (e.g., export PREFECT_API_URL=...)
|
|
338
|
+
- .env file in the current directory
|
|
354
339
|
""",
|
|
355
340
|
)
|
|
356
341
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ai-pipeline-core
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.8
|
|
4
4
|
Summary: Core utilities for AI-powered processing pipelines using prefect
|
|
5
5
|
Project-URL: Homepage, https://github.com/bbarwik/ai-pipeline-core
|
|
6
6
|
Project-URL: Repository, https://github.com/bbarwik/ai-pipeline-core
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
ai_pipeline_core/__init__.py,sha256=
|
|
1
|
+
ai_pipeline_core/__init__.py,sha256=NMpeyF-rJdNnNWfi9eoOgWNnk7kOVHqvSZq3cnyDuCI,5720
|
|
2
2
|
ai_pipeline_core/exceptions.py,sha256=vx-XLTw2fJSPs-vwtXVYtqoQUcOc0JeI7UmHqRqQYWU,1569
|
|
3
3
|
ai_pipeline_core/pipeline.py,sha256=fWTVmrnOEIFge6o2NUYW2ndGef5UurpL8_fK5tkXbzI,28700
|
|
4
4
|
ai_pipeline_core/prefect.py,sha256=91ZgLJHsDsRUW77CpNmkKxYs3RCJuucPM3pjKmNBeDg,2199
|
|
@@ -16,13 +16,13 @@ ai_pipeline_core/documents/temporary_document.py,sha256=Sam344Mm5AlZTm3_l01YdDWe
|
|
|
16
16
|
ai_pipeline_core/documents/utils.py,sha256=ZyJNjFN7ihWno0K7dJZed7twYmmPLA0z40UzFw1A3A8,5465
|
|
17
17
|
ai_pipeline_core/flow/__init__.py,sha256=2BfWYMOPYW5teGzwo-qzpn_bom1lxxry0bPsjVgcsCk,188
|
|
18
18
|
ai_pipeline_core/flow/config.py,sha256=3PCDph2n8dj-txqAvd9Wflbi_6lmfXFR9rUhM-szGSQ,18887
|
|
19
|
-
ai_pipeline_core/flow/options.py,sha256=
|
|
19
|
+
ai_pipeline_core/flow/options.py,sha256=zn3N5DgYtlxLq0AvXfana3UOhym7A3XCheQSBIIarZE,2295
|
|
20
20
|
ai_pipeline_core/llm/__init__.py,sha256=3B_vtEzxrzidP1qOUNQ4RxlUmxZ2MBKQcUhQiTybM9g,661
|
|
21
|
-
ai_pipeline_core/llm/ai_messages.py,sha256=
|
|
22
|
-
ai_pipeline_core/llm/client.py,sha256=
|
|
23
|
-
ai_pipeline_core/llm/model_options.py,sha256=
|
|
24
|
-
ai_pipeline_core/llm/model_response.py,sha256=
|
|
25
|
-
ai_pipeline_core/llm/model_types.py,sha256=
|
|
21
|
+
ai_pipeline_core/llm/ai_messages.py,sha256=Onin3UPdbJQNl3WfY3-_jE5KRmF-ciXsa5K6UPOiy5s,14410
|
|
22
|
+
ai_pipeline_core/llm/client.py,sha256=Da1NgGzfIkFRw_aDASK36MMdKe3DXaj5_3wMg0gR-Hk,24999
|
|
23
|
+
ai_pipeline_core/llm/model_options.py,sha256=uRNIHfVeh2sgt1mZBiOUx6hPQ6GKjB8b7TytZJ6afKg,11768
|
|
24
|
+
ai_pipeline_core/llm/model_response.py,sha256=6kEr9ss3UGlykvtWAvh1l55rGw2-wyVup3QJhm0Oggc,13264
|
|
25
|
+
ai_pipeline_core/llm/model_types.py,sha256=2J4Qsb1x21I4eo_VPeaMMOW8shOGPqzJuoGjTLcBFPM,2791
|
|
26
26
|
ai_pipeline_core/logging/__init__.py,sha256=Nz6-ghAoENsgNmLD2ma9TW9M0U2_QfxuQ5DDW6Vt6M0,651
|
|
27
27
|
ai_pipeline_core/logging/logging.yml,sha256=YTW48keO_K5bkkb-KXGM7ZuaYKiquLsjsURei8Ql0V4,1353
|
|
28
28
|
ai_pipeline_core/logging/logging_config.py,sha256=pV2x6GgMPXrzPH27sicCSXfw56beio4C2JKCJ3NsXrg,6207
|
|
@@ -33,9 +33,9 @@ ai_pipeline_core/simple_runner/simple_runner.py,sha256=f6cIodYkul-Apu1d63T6kR5DZ
|
|
|
33
33
|
ai_pipeline_core/storage/__init__.py,sha256=tcIkjJ3zPBLCyetwiJDewBvS2sbRJrDlBh3gEsQm08E,184
|
|
34
34
|
ai_pipeline_core/storage/storage.py,sha256=ClMr419Y-eU2RuOjZYd51dC0stWQk28Vb56PvQaoUwc,20007
|
|
35
35
|
ai_pipeline_core/utils/__init__.py,sha256=TJSmEm1Quf-gKwXrxM96u2IGzVolUyeNNfLMPoLstXI,254
|
|
36
|
-
ai_pipeline_core/utils/deploy.py,sha256=
|
|
36
|
+
ai_pipeline_core/utils/deploy.py,sha256=rAtRuwkmGkc-fqvDMXpt08OzLrD7KTDMAmLDC9wYg7Y,13147
|
|
37
37
|
ai_pipeline_core/utils/remote_deployment.py,sha256=cPTgnS5InK08qiWnuPz3e8YKjoT3sPBloSaDfNTzghs,10137
|
|
38
|
-
ai_pipeline_core-0.2.
|
|
39
|
-
ai_pipeline_core-0.2.
|
|
40
|
-
ai_pipeline_core-0.2.
|
|
41
|
-
ai_pipeline_core-0.2.
|
|
38
|
+
ai_pipeline_core-0.2.8.dist-info/METADATA,sha256=Sl_8s24ar0lwa7iio4d0QYDhzsmAIGHHqOKuXnFxP7s,15159
|
|
39
|
+
ai_pipeline_core-0.2.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
40
|
+
ai_pipeline_core-0.2.8.dist-info/licenses/LICENSE,sha256=kKj8mfbdWwkyG3U6n7ztB3bAZlEwShTkAsvaY657i3I,1074
|
|
41
|
+
ai_pipeline_core-0.2.8.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|