ai-pipeline-core 0.2.1__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -118,7 +118,7 @@ from .prompt_manager import PromptManager
118
118
  from .settings import Settings
119
119
  from .tracing import TraceInfo, TraceLevel, set_trace_cost, trace
120
120
 
121
- __version__ = "0.2.1"
121
+ __version__ = "0.2.3"
122
122
 
123
123
  __all__ = [
124
124
  # Config/Settings
@@ -302,7 +302,7 @@ class Document(BaseModel, ABC):
302
302
  name: str,
303
303
  content: bytes,
304
304
  description: str | None = None,
305
- sources: list[str] = [],
305
+ sources: list[str] | None = None,
306
306
  ) -> Self: ...
307
307
 
308
308
  @overload
@@ -313,7 +313,7 @@ class Document(BaseModel, ABC):
313
313
  name: str,
314
314
  content: str,
315
315
  description: str | None = None,
316
- sources: list[str] = [],
316
+ sources: list[str] | None = None,
317
317
  ) -> Self: ...
318
318
 
319
319
  @overload
@@ -324,7 +324,7 @@ class Document(BaseModel, ABC):
324
324
  name: str,
325
325
  content: dict[str, Any],
326
326
  description: str | None = None,
327
- sources: list[str] = [],
327
+ sources: list[str] | None = None,
328
328
  ) -> Self: ...
329
329
 
330
330
  @overload
@@ -335,7 +335,7 @@ class Document(BaseModel, ABC):
335
335
  name: str,
336
336
  content: list[Any],
337
337
  description: str | None = None,
338
- sources: list[str] = [],
338
+ sources: list[str] | None = None,
339
339
  ) -> Self: ...
340
340
 
341
341
  @overload
@@ -346,7 +346,7 @@ class Document(BaseModel, ABC):
346
346
  name: str,
347
347
  content: BaseModel,
348
348
  description: str | None = None,
349
- sources: list[str] = [],
349
+ sources: list[str] | None = None,
350
350
  ) -> Self: ...
351
351
 
352
352
  @classmethod
@@ -356,7 +356,7 @@ class Document(BaseModel, ABC):
356
356
  name: str,
357
357
  content: str | bytes | dict[str, Any] | list[Any] | BaseModel,
358
358
  description: str | None = None,
359
- sources: list[str] = [],
359
+ sources: list[str] | None = None,
360
360
  ) -> Self:
361
361
  r"""Create a Document with automatic content type conversion (recommended).
362
362
 
@@ -469,7 +469,7 @@ class Document(BaseModel, ABC):
469
469
  name: str,
470
470
  content: bytes,
471
471
  description: str | None = None,
472
- sources: list[str] = [],
472
+ sources: list[str] | None = None,
473
473
  ) -> None:
474
474
  """Initialize a Document instance with raw bytes content.
475
475
 
@@ -509,7 +509,11 @@ class Document(BaseModel, ABC):
509
509
  if type(self) is Document:
510
510
  raise TypeError("Cannot instantiate abstract Document class directly")
511
511
 
512
- super().__init__(name=name, content=content, description=description, sources=sources)
512
+ # Only pass sources if not None to let Pydantic's default_factory handle it
513
+ if sources is not None:
514
+ super().__init__(name=name, content=content, description=description, sources=sources)
515
+ else:
516
+ super().__init__(name=name, content=content, description=description)
513
517
 
514
518
  name: str
515
519
  description: str | None = None
@@ -46,7 +46,7 @@ class FlowDocument(Document):
46
46
  name: str,
47
47
  content: bytes,
48
48
  description: str | None = None,
49
- sources: list[str] = [],
49
+ sources: list[str] | None = None,
50
50
  ) -> None:
51
51
  """Initialize a FlowDocument with raw bytes content.
52
52
 
@@ -88,7 +88,12 @@ class FlowDocument(Document):
88
88
  """
89
89
  if type(self) is FlowDocument:
90
90
  raise TypeError("Cannot instantiate abstract FlowDocument class directly")
91
- super().__init__(name=name, content=content, description=description, sources=sources)
91
+
92
+ # Only pass sources if not None to let Pydantic's default_factory handle it
93
+ if sources is not None:
94
+ super().__init__(name=name, content=content, description=description, sources=sources)
95
+ else:
96
+ super().__init__(name=name, content=content, description=description)
92
97
 
93
98
  @final
94
99
  def get_base_type(self) -> Literal["flow"]:
@@ -51,7 +51,7 @@ class TaskDocument(Document):
51
51
  name: str,
52
52
  content: bytes,
53
53
  description: str | None = None,
54
- sources: list[str] = [],
54
+ sources: list[str] | None = None,
55
55
  ) -> None:
56
56
  """Initialize a TaskDocument with raw bytes content.
57
57
 
@@ -93,7 +93,12 @@ class TaskDocument(Document):
93
93
  """
94
94
  if type(self) is TaskDocument:
95
95
  raise TypeError("Cannot instantiate abstract TaskDocument class directly")
96
- super().__init__(name=name, content=content, description=description, sources=sources)
96
+
97
+ # Only pass sources if not None to let Pydantic's default_factory handle it
98
+ if sources is not None:
99
+ super().__init__(name=name, content=content, description=description, sources=sources)
100
+ else:
101
+ super().__init__(name=name, content=content, description=description)
97
102
 
98
103
  @final
99
104
  def get_base_type(self) -> Literal["task"]:
@@ -279,8 +279,8 @@ class AIMessages(list[AIMessageType]):
279
279
  for message in self:
280
280
  if isinstance(message, Document):
281
281
  serialized_document = message.serialize_model()
282
- del serialized_document["content"]
283
- messages.append(json.dumps(serialized_document, indent=2))
282
+ filtered_doc = {k: v for k, v in serialized_document.items() if k != "content"}
283
+ messages.append(json.dumps(filtered_doc, indent=2))
284
284
  elif isinstance(message, ModelResponse):
285
285
  messages.append(message.content)
286
286
  else:
@@ -37,7 +37,7 @@ def _process_messages(
37
37
  context: AIMessages,
38
38
  messages: AIMessages,
39
39
  system_prompt: str | None = None,
40
- cache_ttl: str | None = "120s",
40
+ cache_ttl: str | None = "5m",
41
41
  ) -> list[ChatCompletionMessageParam]:
42
42
  """Process and format messages for LLM API consumption.
43
43
 
@@ -245,7 +245,7 @@ async def generate(
245
245
  model: Model to use (e.g., "gpt-5", "gemini-2.5-pro", "grok-4").
246
246
  Accepts predefined models or any string for custom models.
247
247
  context: Static context to cache (documents, examples, instructions).
248
- Defaults to None (empty context). Cached for 120 seconds.
248
+ Defaults to None (empty context). Cached for 5 minutes by default.
249
249
  messages: Dynamic messages/queries. AIMessages or str ONLY.
250
250
  Do not pass Document or DocumentList directly.
251
251
  If string, converted to AIMessages internally.
@@ -338,13 +338,13 @@ async def generate(
338
338
  - Context caching saves ~50-90% tokens on repeated calls
339
339
  - First call: full token cost
340
340
  - Subsequent calls (within cache TTL): only messages tokens
341
- - Default cache TTL is 120s (production-optimized)
341
+ - Default cache TTL is 5m (production-optimized)
342
342
  - Default retry logic: 3 attempts with 10s delay (production-optimized)
343
343
 
344
344
  Caching:
345
345
  When enabled in your LiteLLM proxy and supported by the upstream provider,
346
346
  context messages may be cached to reduce token usage on repeated calls.
347
- Default TTL is 120s (optimized for production workloads). Configure caching
347
+ Default TTL is 5m (optimized for production workloads). Configure caching
348
348
  behavior centrally via your LiteLLM proxy settings, not per API call.
349
349
  Savings depend on provider and payload; treat this as an optimization, not a guarantee.
350
350
 
@@ -447,10 +447,11 @@ async def generate_structured(
447
447
  Defaults to None (empty AIMessages).
448
448
  messages: Dynamic prompts/queries. AIMessages or str ONLY.
449
449
  Do not pass Document or DocumentList directly.
450
- options: DEPRECATED - DO NOT USE. Reserved for internal framework usage only.
451
- Framework defaults are production-optimized. Configure model behavior
452
- centrally via LiteLLM proxy settings, not per API call.
453
- The response_format is set automatically from the response_format parameter.
450
+ options: Optional ModelOptions for configuring temperature, retries, etc.
451
+ If provided, it will NOT be mutated (a copy is created internally).
452
+ The response_format field is set automatically from the response_format parameter.
453
+ In most cases, leave as None to use framework defaults.
454
+ Configure model behavior centrally via LiteLLM proxy settings when possible.
454
455
 
455
456
  VISION/PDF MODEL COMPATIBILITY:
456
457
  When using Documents with images/PDFs in structured output:
@@ -518,6 +519,9 @@ async def generate_structured(
518
519
  context = AIMessages()
519
520
  if options is None:
520
521
  options = ModelOptions()
522
+ else:
523
+ # Create a copy to avoid mutating the caller's options object
524
+ options = options.model_copy()
521
525
 
522
526
  options.response_format = response_format
523
527
 
@@ -10,7 +10,7 @@ from pydantic import BaseModel
10
10
 
11
11
 
12
12
  class ModelOptions(BaseModel):
13
- """Configuration options for LLM generation requests.
13
+ r"""Configuration options for LLM generation requests.
14
14
 
15
15
  ModelOptions encapsulates all configuration parameters for model
16
16
  generation, including model behavior settings, retry logic, and
@@ -45,7 +45,7 @@ class ModelOptions(BaseModel):
45
45
 
46
46
  timeout: Maximum seconds to wait for response (default: 300).
47
47
 
48
- cache_ttl: Cache TTL for context messages (default: "120s").
48
+ cache_ttl: Cache TTL for context messages (default: "5m").
49
49
  String format like "60s", "5m", or None to disable caching.
50
50
  Applied to the last context message for efficient token reuse.
51
51
 
@@ -62,11 +62,37 @@ class ModelOptions(BaseModel):
62
62
  max_completion_tokens: Maximum tokens to generate.
63
63
  None uses model default.
64
64
 
65
+ stop: Stop sequences that halt generation when encountered.
66
+ Can be a single string or list of strings.
67
+ When the model generates any of these sequences, it stops immediately.
68
+ Maximum of 4 stop sequences supported by most providers.
69
+
65
70
  response_format: Pydantic model class for structured output.
66
71
  Pass a Pydantic model; the client converts it to JSON Schema.
67
72
  Set automatically by generate_structured().
68
73
  Structured output support varies by provider and model.
69
74
 
75
+ verbosity: Controls output verbosity for models that support it.
76
+ Literal["low", "medium", "high"] | None
77
+ "low": Minimal output
78
+ "medium": Standard output
79
+ "high": Detailed output
80
+ Note: Only some models support verbosity control.
81
+
82
+ usage_tracking: Enable token usage tracking in API responses (default: True).
83
+ When enabled, adds {"usage": {"include": True}} to extra_body.
84
+ Disable for providers that don't support usage tracking.
85
+
86
+ user: User identifier for cost tracking and monitoring.
87
+ A unique identifier representing the end-user, which can help track costs
88
+ and detect abuse. Maximum length is typically 256 characters.
89
+ Useful for multi-tenant applications or per-user billing.
90
+
91
+ extra_body: Additional provider-specific parameters to pass in request body.
92
+ Dictionary of custom parameters not covered by standard options.
93
+ Merged with usage_tracking if both are set.
94
+ Useful for beta features or provider-specific capabilities.
95
+
70
96
  Example:
71
97
  >>> # Basic configuration
72
98
  >>> options = ModelOptions(
@@ -103,13 +129,35 @@ class ModelOptions(BaseModel):
103
129
  ... reasoning_effort="high", # Deep reasoning
104
130
  ... timeout=600 # More time for complex reasoning
105
131
  ... )
132
+ >>>
133
+ >>> # With stop sequences
134
+ >>> options = ModelOptions(
135
+ ... stop=["STOP", "END", "\n\n"], # Stop on these sequences
136
+ ... temperature=0.7
137
+ ... )
138
+ >>>
139
+ >>> # With custom extra_body parameters
140
+ >>> options = ModelOptions(
141
+ ... extra_body={"custom_param": "value", "beta_feature": True},
142
+ ... usage_tracking=True # Still tracks usage alongside custom params
143
+ ... )
144
+ >>>
145
+ >>> # With user tracking for cost monitoring
146
+ >>> options = ModelOptions(
147
+ ... user="user_12345", # Track costs per user
148
+ ... temperature=0.7
149
+ ... )
106
150
 
107
151
  Note:
108
152
  - Not all options apply to all models
109
153
  - search_context_size only works with search models
110
154
  - reasoning_effort only works with models that support explicit reasoning
111
155
  - response_format is set internally by generate_structured()
112
- - cache_ttl accepts formats like "120s", "5m", "1h" or None to disable caching
156
+ - cache_ttl accepts formats like "120s", "5m" (default), "1h" or None to disable caching
157
+ - stop sequences are limited to 4 by most providers
158
+ - user identifier helps track costs per end-user (max 256 chars)
159
+ - extra_body allows passing provider-specific parameters
160
+ - usage_tracking is enabled by default for cost monitoring
113
161
  """
114
162
 
115
163
  temperature: float | None = None
@@ -118,11 +166,16 @@ class ModelOptions(BaseModel):
118
166
  reasoning_effort: Literal["low", "medium", "high"] | None = None
119
167
  retries: int = 3
120
168
  retry_delay_seconds: int = 10
121
- timeout: int = 300
122
- cache_ttl: str | None = "120s"
169
+ timeout: int = 600
170
+ cache_ttl: str | None = "5m"
123
171
  service_tier: Literal["auto", "default", "flex", "scale", "priority"] | None = None
124
172
  max_completion_tokens: int | None = None
173
+ stop: str | list[str] | None = None
125
174
  response_format: type[BaseModel] | None = None
175
+ verbosity: Literal["low", "medium", "high"] | None = None
176
+ usage_tracking: bool = True
177
+ user: str | None = None
178
+ extra_body: dict[str, Any] | None = None
126
179
 
127
180
  def to_openai_completion_kwargs(self) -> dict[str, Any]:
128
181
  """Convert options to OpenAI API completion parameters.
@@ -140,10 +193,14 @@ class ModelOptions(BaseModel):
140
193
  API parameter mapping:
141
194
  - temperature -> temperature
142
195
  - max_completion_tokens -> max_completion_tokens
196
+ - stop -> stop (string or list of strings)
143
197
  - reasoning_effort -> reasoning_effort
144
198
  - search_context_size -> web_search_options.search_context_size
145
199
  - response_format -> response_format
146
200
  - service_tier -> service_tier
201
+ - verbosity -> verbosity
202
+ - user -> user (for cost tracking)
203
+ - extra_body -> extra_body (merged with usage tracking)
147
204
 
148
205
  Web Search Structure:
149
206
  When search_context_size is set, creates:
@@ -163,17 +220,21 @@ class ModelOptions(BaseModel):
163
220
  """
164
221
  kwargs: dict[str, Any] = {
165
222
  "timeout": self.timeout,
166
- "extra_body": {
167
- "usage": {"include": True}, # For openrouter cost tracking
168
- },
223
+ "extra_body": {},
169
224
  }
170
225
 
226
+ if self.extra_body:
227
+ kwargs["extra_body"] = self.extra_body
228
+
171
229
  if self.temperature:
172
230
  kwargs["temperature"] = self.temperature
173
231
 
174
232
  if self.max_completion_tokens:
175
233
  kwargs["max_completion_tokens"] = self.max_completion_tokens
176
234
 
235
+ if self.stop:
236
+ kwargs["stop"] = self.stop
237
+
177
238
  if self.reasoning_effort:
178
239
  kwargs["reasoning_effort"] = self.reasoning_effort
179
240
 
@@ -186,4 +247,13 @@ class ModelOptions(BaseModel):
186
247
  if self.service_tier:
187
248
  kwargs["service_tier"] = self.service_tier
188
249
 
250
+ if self.verbosity:
251
+ kwargs["verbosity"] = self.verbosity
252
+
253
+ if self.user:
254
+ kwargs["user"] = self.user
255
+
256
+ if self.usage_tracking:
257
+ kwargs["extra_body"]["usage"] = {"include": True}
258
+
189
259
  return kwargs
@@ -110,7 +110,8 @@ class ModelResponse(ChatCompletion):
110
110
  >>> if "error" in response.content.lower():
111
111
  ... # Handle error case
112
112
  """
113
- return self.choices[0].message.content or ""
113
+ content = self.choices[0].message.content or ""
114
+ return content.split("</think>")[-1].strip()
114
115
 
115
116
  def set_model_options(self, options: dict[str, Any]) -> None:
116
117
  """Store the model configuration used for generation.
@@ -15,7 +15,7 @@ from functools import wraps
15
15
  from typing import Any, Callable, Literal, ParamSpec, TypeVar, cast, overload
16
16
 
17
17
  from lmnr import Attributes, Instruments, Laminar, observe
18
- from pydantic import BaseModel
18
+ from pydantic import BaseModel, Field
19
19
 
20
20
  # Import for document trimming - needed for isinstance checks
21
21
  # These are lazy imports only used when trim_documents is enabled
@@ -226,8 +226,8 @@ class TraceInfo(BaseModel):
226
226
 
227
227
  session_id: str | None = None
228
228
  user_id: str | None = None
229
- metadata: dict[str, str] = {}
230
- tags: list[str] = []
229
+ metadata: dict[str, str] = Field(default_factory=dict)
230
+ tags: list[str] = Field(default_factory=list)
231
231
 
232
232
  def get_observe_kwargs(self) -> dict[str, Any]:
233
233
  """Convert TraceInfo to kwargs for Laminar's observe decorator.
@@ -502,11 +502,10 @@ def trace(
502
502
  observe_name = name or f.__name__
503
503
  _observe = observe
504
504
 
505
- # Store the new parameters
506
505
  _session_id = session_id
507
506
  _user_id = user_id
508
- _metadata = metadata or {}
509
- _tags = tags or []
507
+ _metadata = metadata if metadata is not None else {}
508
+ _tags = tags if tags is not None else []
510
509
  _span_type = span_type
511
510
  _ignore_input = ignore_input
512
511
  _ignore_output = ignore_output
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ai-pipeline-core
3
- Version: 0.2.1
3
+ Version: 0.2.3
4
4
  Summary: Core utilities for AI-powered processing pipelines using prefect
5
5
  Project-URL: Homepage, https://github.com/bbarwik/ai-pipeline-core
6
6
  Project-URL: Repository, https://github.com/bbarwik/ai-pipeline-core
@@ -20,12 +20,12 @@ Classifier: Typing :: Typed
20
20
  Requires-Python: >=3.12
21
21
  Requires-Dist: httpx>=0.28.1
22
22
  Requires-Dist: jinja2>=3.1.6
23
- Requires-Dist: lmnr>=0.7.13
24
- Requires-Dist: openai>=1.108.1
23
+ Requires-Dist: lmnr>=0.7.17
24
+ Requires-Dist: openai>=1.109.1
25
25
  Requires-Dist: prefect-gcp[cloud-storage]>=0.6.10
26
- Requires-Dist: prefect>=3.4.19
26
+ Requires-Dist: prefect>=3.4.21
27
27
  Requires-Dist: pydantic-settings>=2.10.1
28
- Requires-Dist: pydantic>=2.11.7
28
+ Requires-Dist: pydantic>=2.11.9
29
29
  Requires-Dist: python-magic>=0.4.27
30
30
  Requires-Dist: ruamel-yaml>=0.18.14
31
31
  Requires-Dist: tiktoken>=0.11.0
@@ -1,27 +1,27 @@
1
- ai_pipeline_core/__init__.py,sha256=xTgroRQcXnKqsZSr8XQp-Q7R8gzdIBppVYPxYh5c5uo,5720
1
+ ai_pipeline_core/__init__.py,sha256=Zjw9zKJEEiDkKt8xyQbKnkwspyrGcmG2KVxcDAAkNik,5720
2
2
  ai_pipeline_core/exceptions.py,sha256=vx-XLTw2fJSPs-vwtXVYtqoQUcOc0JeI7UmHqRqQYWU,1569
3
3
  ai_pipeline_core/pipeline.py,sha256=_00Qctqd7QibyXaetZv6KfyWoW9KZIRdndkYItNHWWI,28921
4
4
  ai_pipeline_core/prefect.py,sha256=91ZgLJHsDsRUW77CpNmkKxYs3RCJuucPM3pjKmNBeDg,2199
5
5
  ai_pipeline_core/prompt_manager.py,sha256=FAtb1yK7bGuAeuIJ523LOX9bd7TrcHG-TqZ7Lz4RJC0,12087
6
6
  ai_pipeline_core/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  ai_pipeline_core/settings.py,sha256=-a9jVGg77xifj2SagCR9shXfzXUd-2MlrlquEu4htG8,5035
8
- ai_pipeline_core/tracing.py,sha256=9RaJaAX5Vp2C8t73TaY-a9gpVy6a_VtSY0JPohIoQsc,31460
8
+ ai_pipeline_core/tracing.py,sha256=mmK64s1lw18EE_7PQgfZb0sJhAuhkVDxXw_wBpR7UGE,31530
9
9
  ai_pipeline_core/documents/__init__.py,sha256=WHStvGZiSyybOcMTYxSV24U6MA3Am_0_Az5p-DuMFrk,738
10
- ai_pipeline_core/documents/document.py,sha256=L3S_bfOiViMZLYRcmbV4-s3qO8HoGmqJ5g3bXNVs_3Q,67082
10
+ ai_pipeline_core/documents/document.py,sha256=LVUJXr3TyFHQZalWNuoHzFpoVAch3AEZoKxyh27qVAY,67361
11
11
  ai_pipeline_core/documents/document_list.py,sha256=Y_NCjfM_CjkIwHRD2iyGgYBuIykN8lT2IIH_uWOiGis,16254
12
- ai_pipeline_core/documents/flow_document.py,sha256=g9wlRJRJgy4RsrrZ_P5Qu6kj0FuUFfhfUsRFgtq4NIM,3918
12
+ ai_pipeline_core/documents/flow_document.py,sha256=vSPzE4kGuDjGUfFykfpPaSfMuIO9_kDfTvdc8kZaE8U,4144
13
13
  ai_pipeline_core/documents/mime_type.py,sha256=DkW88K95el5nAmhC00XLS0G3WpDXgs5IRsBWbKiqG3Y,7995
14
- ai_pipeline_core/documents/task_document.py,sha256=40tFavBLX3FhK9-CRsuOH-3gUZ0zvEkqv9XcMFr8ySk,4077
14
+ ai_pipeline_core/documents/task_document.py,sha256=4j94N-hkqXVmzjyUjbA9YW2oR4dqnOhqA3D5OWrmGkw,4303
15
15
  ai_pipeline_core/documents/temporary_document.py,sha256=Sam344Mm5AlZTm3_l01YdDWeF26F6pR2tytGRL1doQY,2711
16
16
  ai_pipeline_core/documents/utils.py,sha256=ZyJNjFN7ihWno0K7dJZed7twYmmPLA0z40UzFw1A3A8,5465
17
17
  ai_pipeline_core/flow/__init__.py,sha256=2BfWYMOPYW5teGzwo-qzpn_bom1lxxry0bPsjVgcsCk,188
18
18
  ai_pipeline_core/flow/config.py,sha256=3PCDph2n8dj-txqAvd9Wflbi_6lmfXFR9rUhM-szGSQ,18887
19
19
  ai_pipeline_core/flow/options.py,sha256=2rKR2GifhXcyw8avI_oiEDMLC2jm5Qzpw8z56pbxUMo,2285
20
20
  ai_pipeline_core/llm/__init__.py,sha256=3B_vtEzxrzidP1qOUNQ4RxlUmxZ2MBKQcUhQiTybM9g,661
21
- ai_pipeline_core/llm/ai_messages.py,sha256=ML4rSCCEEu9_83Mnfn7r4yx0pUkarvnBsrxRZbO4ulw,13126
22
- ai_pipeline_core/llm/client.py,sha256=3nN_QTW0R64PEvZsg9RxsYnxyq_hvYHYj-nBYG8KeDA,22773
23
- ai_pipeline_core/llm/model_options.py,sha256=UFuI2drXNamA1G7OSwGHeXbOrkf2gGy2jpMpZEfK9i4,7789
24
- ai_pipeline_core/llm/model_response.py,sha256=xKJPsqFHtOGfqpKlsGzyBHPbqjEjNfP-Ix3lGVdiTjQ,15289
21
+ ai_pipeline_core/llm/ai_messages.py,sha256=dNhVsVVe_tL8IcSficsyEycW6Ibp3ICFMKj8Syb_h-U,13164
22
+ ai_pipeline_core/llm/client.py,sha256=b-c7WTfFlokrZibsdbuj82L4V-lzff330gUH4RTybHo,22988
23
+ ai_pipeline_core/llm/model_options.py,sha256=cfgxCqhaYwxPt_mwACslsFt3XBSEufUkICU7x1Q5ZzQ,10905
24
+ ai_pipeline_core/llm/model_response.py,sha256=iNSKobR3gzZ-CSC8hz8-grgL7jdd2IcnCSX0exdlg7o,15345
25
25
  ai_pipeline_core/llm/model_types.py,sha256=2J4Qsb1x21I4eo_VPeaMMOW8shOGPqzJuoGjTLcBFPM,2791
26
26
  ai_pipeline_core/logging/__init__.py,sha256=Nz6-ghAoENsgNmLD2ma9TW9M0U2_QfxuQ5DDW6Vt6M0,651
27
27
  ai_pipeline_core/logging/logging.yml,sha256=YTW48keO_K5bkkb-KXGM7ZuaYKiquLsjsURei8Ql0V4,1353
@@ -32,7 +32,7 @@ ai_pipeline_core/simple_runner/cli.py,sha256=yVyuxLY2RZvdNwmwT5LCe-km2nQJzWTPI0v
32
32
  ai_pipeline_core/simple_runner/simple_runner.py,sha256=f6cIodYkul-Apu1d63T6kR5DZpiaCWpphUcEPp5XjFo,9102
33
33
  ai_pipeline_core/storage/__init__.py,sha256=tcIkjJ3zPBLCyetwiJDewBvS2sbRJrDlBh3gEsQm08E,184
34
34
  ai_pipeline_core/storage/storage.py,sha256=ClMr419Y-eU2RuOjZYd51dC0stWQk28Vb56PvQaoUwc,20007
35
- ai_pipeline_core-0.2.1.dist-info/METADATA,sha256=OBa_0wjIopzJcEMRMgfkZgeTZ8Vk7kad3QuzM6Rg2x4,15159
36
- ai_pipeline_core-0.2.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
37
- ai_pipeline_core-0.2.1.dist-info/licenses/LICENSE,sha256=kKj8mfbdWwkyG3U6n7ztB3bAZlEwShTkAsvaY657i3I,1074
38
- ai_pipeline_core-0.2.1.dist-info/RECORD,,
35
+ ai_pipeline_core-0.2.3.dist-info/METADATA,sha256=83uM7yjSTdk1ssloTwf-uiShUTWps7c2FC0ESsy9VSo,15159
36
+ ai_pipeline_core-0.2.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
37
+ ai_pipeline_core-0.2.3.dist-info/licenses/LICENSE,sha256=kKj8mfbdWwkyG3U6n7ztB3bAZlEwShTkAsvaY657i3I,1074
38
+ ai_pipeline_core-0.2.3.dist-info/RECORD,,