ai-pipeline-core 0.2.7__tar.gz → 0.2.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/PKG-INFO +1 -1
  2. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/__init__.py +1 -1
  3. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/flow/options.py +2 -2
  4. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/llm/client.py +35 -1
  5. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/pyproject.toml +2 -2
  6. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/.gitignore +0 -0
  7. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/LICENSE +0 -0
  8. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/README.md +0 -0
  9. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/documents/__init__.py +0 -0
  10. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/documents/document.py +0 -0
  11. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/documents/document_list.py +0 -0
  12. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/documents/flow_document.py +0 -0
  13. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/documents/mime_type.py +0 -0
  14. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/documents/task_document.py +0 -0
  15. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/documents/temporary_document.py +0 -0
  16. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/documents/utils.py +0 -0
  17. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/exceptions.py +0 -0
  18. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/flow/__init__.py +0 -0
  19. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/flow/config.py +0 -0
  20. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/llm/__init__.py +0 -0
  21. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/llm/ai_messages.py +0 -0
  22. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/llm/model_options.py +0 -0
  23. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/llm/model_response.py +0 -0
  24. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/llm/model_types.py +0 -0
  25. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/logging/__init__.py +0 -0
  26. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/logging/logging.yml +0 -0
  27. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/logging/logging_config.py +0 -0
  28. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/logging/logging_mixin.py +0 -0
  29. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/pipeline.py +0 -0
  30. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/prefect.py +0 -0
  31. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/prompt_manager.py +0 -0
  32. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/py.typed +0 -0
  33. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/settings.py +0 -0
  34. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/simple_runner/__init__.py +0 -0
  35. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/simple_runner/cli.py +0 -0
  36. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/simple_runner/simple_runner.py +0 -0
  37. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/storage/__init__.py +0 -0
  38. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/storage/storage.py +0 -0
  39. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/tracing.py +0 -0
  40. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/utils/__init__.py +0 -0
  41. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/utils/deploy.py +0 -0
  42. {ai_pipeline_core-0.2.7 → ai_pipeline_core-0.2.8}/ai_pipeline_core/utils/remote_deployment.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ai-pipeline-core
3
- Version: 0.2.7
3
+ Version: 0.2.8
4
4
  Summary: Core utilities for AI-powered processing pipelines using prefect
5
5
  Project-URL: Homepage, https://github.com/bbarwik/ai-pipeline-core
6
6
  Project-URL: Repository, https://github.com/bbarwik/ai-pipeline-core
@@ -118,7 +118,7 @@ from .prompt_manager import PromptManager
118
118
  from .settings import Settings
119
119
  from .tracing import TraceInfo, TraceLevel, set_trace_cost, trace
120
120
 
121
- __version__ = "0.2.7"
121
+ __version__ = "0.2.8"
122
122
 
123
123
  __all__ = [
124
124
  # Config/Settings
@@ -61,11 +61,11 @@ class FlowOptions(BaseSettings):
61
61
  """
62
62
 
63
63
  core_model: ModelName = Field(
64
- default="gpt-5",
64
+ default="gemini-2.5-pro",
65
65
  description="Primary model for complex analysis and generation tasks.",
66
66
  )
67
67
  small_model: ModelName = Field(
68
- default="gpt-5-mini",
68
+ default="grok-4-fast",
69
69
  description="Fast, cost-effective model for simple tasks and orchestration.",
70
70
  )
71
71
 
@@ -108,6 +108,38 @@ def _process_messages(
108
108
  return processed_messages
109
109
 
110
110
 
111
+ def _remove_cache_control(
112
+ messages: list[ChatCompletionMessageParam],
113
+ ) -> list[ChatCompletionMessageParam]:
114
+ """Remove cache control directives from messages.
115
+
116
+ Internal utility that strips cache_control fields from both message-level
117
+ and content-level entries. Used in retry logic when cache-related errors
118
+ occur during LLM API calls.
119
+
120
+ Args:
121
+ messages: List of messages that may contain cache_control directives.
122
+
123
+ Returns:
124
+ The same message list (modified in-place) with all cache_control
125
+ fields removed from both messages and their content items.
126
+
127
+ Note:
128
+ This function modifies the input list in-place but also returns it
129
+ for convenience. Handles both list-based content (multipart) and
130
+ string content (simple messages).
131
+ """
132
+ for message in messages:
133
+ if content := message.get("content"):
134
+ if isinstance(content, list):
135
+ for item in content:
136
+ if "cache_control" in item:
137
+ del item["cache_control"]
138
+ if "cache_control" in message:
139
+ del message["cache_control"]
140
+ return messages # type: ignore
141
+
142
+
111
143
  def _model_name_to_openrouter_model(model: ModelName) -> str:
112
144
  """Convert a model name to an OpenRouter model name.
113
145
 
@@ -242,7 +274,7 @@ async def _generate_with_retry(
242
274
  if not context and not messages:
243
275
  raise ValueError("Either context or messages must be provided")
244
276
 
245
- if "gemini" in model.lower() and context.approximate_tokens_count < 5000:
277
+ if "gemini" in model.lower() and context.approximate_tokens_count < 10000:
246
278
  # Bug fix for minimum explicit context size for Gemini models
247
279
  options.cache_ttl = None
248
280
 
@@ -272,6 +304,8 @@ async def _generate_with_retry(
272
304
  if not isinstance(e, asyncio.TimeoutError):
273
305
  # disable cache if it's not a timeout because it may cause an error
274
306
  completion_kwargs["extra_body"]["cache"] = {"no-cache": True}
307
+ # sometimes there are issues with cache so cache is removed in case of failure
308
+ processed_messages = _remove_cache_control(processed_messages)
275
309
 
276
310
  logger.warning(
277
311
  f"LLM generation failed (attempt {attempt + 1}/{options.retries}): {e}",
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "ai-pipeline-core"
3
- version = "0.2.7"
3
+ version = "0.2.8"
4
4
  description = "Core utilities for AI-powered processing pipelines using prefect"
5
5
  readme = "README.md"
6
6
  license = {text = "MIT"}
@@ -178,7 +178,7 @@ reportIncompatibleVariableOverride = "error"
178
178
  reportMissingParameterType = "warning"
179
179
 
180
180
  [tool.bumpversion]
181
- current_version = "0.2.7"
181
+ current_version = "0.2.8"
182
182
  commit = true
183
183
  tag = true
184
184
  tag_name = "v{new_version}"