ai-pipeline-core 0.2.8__tar.gz → 0.2.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/PKG-INFO +1 -1
  2. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/__init__.py +1 -1
  3. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/llm/client.py +21 -20
  4. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/llm/model_response.py +6 -3
  5. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/pyproject.toml +2 -2
  6. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/.gitignore +0 -0
  7. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/LICENSE +0 -0
  8. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/README.md +0 -0
  9. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/documents/__init__.py +0 -0
  10. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/documents/document.py +0 -0
  11. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/documents/document_list.py +0 -0
  12. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/documents/flow_document.py +0 -0
  13. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/documents/mime_type.py +0 -0
  14. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/documents/task_document.py +0 -0
  15. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/documents/temporary_document.py +0 -0
  16. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/documents/utils.py +0 -0
  17. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/exceptions.py +0 -0
  18. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/flow/__init__.py +0 -0
  19. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/flow/config.py +0 -0
  20. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/flow/options.py +0 -0
  21. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/llm/__init__.py +0 -0
  22. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/llm/ai_messages.py +0 -0
  23. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/llm/model_options.py +0 -0
  24. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/llm/model_types.py +0 -0
  25. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/logging/__init__.py +0 -0
  26. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/logging/logging.yml +0 -0
  27. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/logging/logging_config.py +0 -0
  28. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/logging/logging_mixin.py +0 -0
  29. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/pipeline.py +0 -0
  30. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/prefect.py +0 -0
  31. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/prompt_manager.py +0 -0
  32. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/py.typed +0 -0
  33. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/settings.py +0 -0
  34. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/simple_runner/__init__.py +0 -0
  35. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/simple_runner/cli.py +0 -0
  36. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/simple_runner/simple_runner.py +0 -0
  37. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/storage/__init__.py +0 -0
  38. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/storage/storage.py +0 -0
  39. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/tracing.py +0 -0
  40. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/utils/__init__.py +0 -0
  41. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/utils/deploy.py +0 -0
  42. {ai_pipeline_core-0.2.8 → ai_pipeline_core-0.2.9}/ai_pipeline_core/utils/remote_deployment.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ai-pipeline-core
3
- Version: 0.2.8
3
+ Version: 0.2.9
4
4
  Summary: Core utilities for AI-powered processing pipelines using prefect
5
5
  Project-URL: Homepage, https://github.com/bbarwik/ai-pipeline-core
6
6
  Project-URL: Repository, https://github.com/bbarwik/ai-pipeline-core
@@ -118,7 +118,7 @@ from .prompt_manager import PromptManager
118
118
  from .settings import Settings
119
119
  from .tracing import TraceInfo, TraceLevel, set_trace_cost, trace
120
120
 
121
- __version__ = "0.2.8"
121
+ __version__ = "0.2.9"
122
122
 
123
123
  __all__ = [
124
124
  # Config/Settings
@@ -45,31 +45,30 @@ def _process_messages(
45
45
 
46
46
  Internal function that combines context and messages into a single
47
47
  list of API-compatible messages. Applies caching directives to
48
- context messages for efficiency.
48
+ system prompt and context messages for efficiency.
49
49
 
50
50
  Args:
51
51
  context: Messages to be cached (typically expensive/static content).
52
52
  messages: Regular messages without caching (dynamic queries).
53
53
  system_prompt: Optional system instructions for the model.
54
- cache_ttl: Cache TTL for context messages (e.g. "120s", "300s", "1h").
54
+ cache_ttl: Cache TTL for system and context messages (e.g. "120s", "300s", "1h").
55
55
  Set to None or empty string to disable caching.
56
56
 
57
57
  Returns:
58
58
  List of formatted messages ready for API calls, with:
59
- - System prompt at the beginning (if provided)
60
- - Context messages with cache_control on the last one (if cache_ttl)
59
+ - System prompt at the beginning with cache_control (if provided and cache_ttl set)
60
+ - Context messages with cache_control on all messages (if cache_ttl set)
61
61
  - Regular messages without caching
62
62
 
63
63
  System Prompt Location:
64
64
  The system prompt parameter is always injected as the FIRST message
65
- with role="system". It is NOT cached with context, allowing dynamic
66
- system prompts without breaking cache efficiency.
65
+ with role="system". It is cached along with context when cache_ttl is set.
67
66
 
68
67
  Cache behavior:
69
- The last context message gets ephemeral caching with specified TTL
68
+ All system and context messages get ephemeral caching with specified TTL
70
69
  to reduce token usage on repeated calls with same context.
71
70
  If cache_ttl is None or empty string (falsy), no caching is applied.
72
- Only the last context message receives cache_control to maximize efficiency.
71
+ All system and context messages receive cache_control to maximize cache efficiency.
73
72
 
74
73
  Note:
75
74
  This is an internal function used by _generate_with_retry().
@@ -79,26 +78,28 @@ def _process_messages(
79
78
 
80
79
  # Add system prompt if provided
81
80
  if system_prompt:
82
- processed_messages.append({"role": "system", "content": system_prompt})
81
+ processed_messages.append({
82
+ "role": "system",
83
+ "content": [{"type": "text", "text": system_prompt}],
84
+ })
83
85
 
84
86
  # Process context messages with caching if provided
85
87
  if context:
86
88
  # Use AIMessages.to_prompt() for context
87
89
  context_messages = context.to_prompt()
90
+ processed_messages.extend(context_messages)
88
91
 
89
- # Apply caching to last context message and last content part if cache_ttl is set
90
- if cache_ttl:
91
- context_messages[-1]["cache_control"] = { # type: ignore
92
- "type": "ephemeral",
93
- "ttl": cache_ttl,
94
- }
95
- assert isinstance(context_messages[-1]["content"], list) # type: ignore
96
- context_messages[-1]["content"][-1]["cache_control"] = { # type: ignore
92
+ if cache_ttl:
93
+ for message in processed_messages:
94
+ message["cache_control"] = { # type: ignore
97
95
  "type": "ephemeral",
98
96
  "ttl": cache_ttl,
99
97
  }
100
-
101
- processed_messages.extend(context_messages)
98
+ if isinstance(message["content"], list): # type: ignore
99
+ message["content"][-1]["cache_control"] = { # type: ignore
100
+ "type": "ephemeral",
101
+ "ttl": cache_ttl,
102
+ }
102
103
 
103
104
  # Process regular messages without caching
104
105
  if messages:
@@ -156,7 +157,7 @@ def _model_name_to_openrouter_model(model: ModelName) -> str:
156
157
  if model == "grok-4-fast-search":
157
158
  return "x-ai/grok-4-fast:online"
158
159
  if model == "sonar-pro-search":
159
- return "perplexity/sonar-reasoning-pro"
160
+ return "perplexity/sonar-pro-search"
160
161
  if model.startswith("gemini"):
161
162
  return f"google/{model}"
162
163
  elif model.startswith("gpt"):
@@ -88,10 +88,13 @@ class ModelResponse(ChatCompletion):
88
88
  data = chat_completion.model_dump()
89
89
 
90
90
  # fixes issue where the role is "assistantassistant" instead of "assistant"
91
+ valid_finish_reasons = {"stop", "length", "tool_calls", "content_filter", "function_call"}
91
92
  for i in range(len(data["choices"])):
92
- if role := data["choices"][i]["message"].get("role"):
93
- if role.startswith("assistant") and role != "assistant":
94
- data["choices"][i]["message"]["role"] = "assistant"
93
+ data["choices"][i]["message"]["role"] = "assistant"
94
+ # Only update finish_reason if it's not already a valid value
95
+ current_finish_reason = data["choices"][i].get("finish_reason")
96
+ if current_finish_reason not in valid_finish_reasons:
97
+ data["choices"][i]["finish_reason"] = "stop"
95
98
 
96
99
  super().__init__(**data)
97
100
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "ai-pipeline-core"
3
- version = "0.2.8"
3
+ version = "0.2.9"
4
4
  description = "Core utilities for AI-powered processing pipelines using prefect"
5
5
  readme = "README.md"
6
6
  license = {text = "MIT"}
@@ -178,7 +178,7 @@ reportIncompatibleVariableOverride = "error"
178
178
  reportMissingParameterType = "warning"
179
179
 
180
180
  [tool.bumpversion]
181
- current_version = "0.2.8"
181
+ current_version = "0.2.9"
182
182
  commit = true
183
183
  tag = true
184
184
  tag_name = "v{new_version}"