ai-pipeline-core 0.1.3__tar.gz → 0.1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/PKG-INFO +3 -3
  2. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/README.md +1 -1
  3. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/__init__.py +1 -1
  4. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/documents/document.py +2 -2
  5. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/llm/ai_messages.py +6 -0
  6. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/llm/client.py +13 -10
  7. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/tracing.py +4 -0
  8. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/pyproject.toml +3 -3
  9. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/.gitignore +0 -0
  10. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/LICENSE +0 -0
  11. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/documents/__init__.py +0 -0
  12. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/documents/document_list.py +0 -0
  13. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/documents/flow_document.py +0 -0
  14. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/documents/mime_type.py +0 -0
  15. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/documents/task_document.py +0 -0
  16. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/documents/utils.py +0 -0
  17. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/exceptions.py +0 -0
  18. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/flow/__init__.py +0 -0
  19. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/flow/config.py +0 -0
  20. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/llm/__init__.py +0 -0
  21. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/llm/model_options.py +0 -0
  22. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/llm/model_response.py +0 -0
  23. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/llm/model_types.py +0 -0
  24. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/logging/__init__.py +0 -0
  25. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/logging/logging.yml +0 -0
  26. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/logging/logging_config.py +0 -0
  27. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/logging/logging_mixin.py +0 -0
  28. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/prompt_manager.py +0 -0
  29. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/py.typed +0 -0
  30. {ai_pipeline_core-0.1.3 → ai_pipeline_core-0.1.5}/ai_pipeline_core/settings.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ai-pipeline-core
3
- Version: 0.1.3
3
+ Version: 0.1.5
4
4
  Summary: Core utilities for AI-powered processing pipelines using prefect
5
5
  Project-URL: Homepage, https://github.com/bbarwik/ai-pipeline-core
6
6
  Project-URL: Repository, https://github.com/bbarwik/ai-pipeline-core
@@ -20,7 +20,7 @@ Classifier: Typing :: Typed
20
20
  Requires-Python: >=3.12
21
21
  Requires-Dist: httpx>=0.28.1
22
22
  Requires-Dist: jinja2>=3.1.6
23
- Requires-Dist: lmnr>=0.7.4
23
+ Requires-Dist: lmnr>=0.7.5
24
24
  Requires-Dist: openai>=1.99.9
25
25
  Requires-Dist: prefect>=3.4.13
26
26
  Requires-Dist: pydantic-settings>=2.10.1
@@ -471,7 +471,7 @@ Built with:
471
471
 
472
472
  ## Stability Notice
473
473
 
474
- **Current Version**: 0.1.2
474
+ **Current Version**: 0.1.5
475
475
  **Status**: Internal Preview
476
476
  **API Stability**: Unstable - Breaking changes expected
477
477
  **Recommended Use**: Learning and reference only
@@ -429,7 +429,7 @@ Built with:
429
429
 
430
430
  ## Stability Notice
431
431
 
432
- **Current Version**: 0.1.2
432
+ **Current Version**: 0.1.5
433
433
  **Status**: Internal Preview
434
434
  **API Stability**: Unstable - Breaking changes expected
435
435
  **Recommended Use**: Learning and reference only
@@ -16,7 +16,7 @@ from .prompt_manager import PromptManager
16
16
  from .settings import settings
17
17
  from .tracing import trace
18
18
 
19
- __version__ = "0.1.2"
19
+ __version__ = "0.1.4"
20
20
 
21
21
  __all__ = [
22
22
  "Document",
@@ -25,7 +25,7 @@ from .mime_type import (
25
25
  class Document(BaseModel, ABC):
26
26
  """Abstract base class for all documents"""
27
27
 
28
- MAX_CONTENT_SIZE: ClassVar[int] = 10 * 1024 * 1024 # 10MB default
28
+ MAX_CONTENT_SIZE: ClassVar[int] = 25 * 1024 * 1024 # 25MB default
29
29
  DESCRIPTION_EXTENSION: ClassVar[str] = ".description.md"
30
30
  MARKDOWN_LIST_SEPARATOR: ClassVar[str] = "\n\n---\n\n"
31
31
 
@@ -105,7 +105,7 @@ class Document(BaseModel, ABC):
105
105
  except TypeError:
106
106
  raise DocumentNameError(f"{cls.__name__}.FILES must be an Enum of string values")
107
107
 
108
- if name not in allowed:
108
+ if len(allowed) > 0 and name not in allowed:
109
109
  allowed_str = ", ".join(sorted(allowed))
110
110
  raise DocumentNameError(f"Invalid filename '{name}'. Allowed names: {allowed_str}")
111
111
 
@@ -1,4 +1,5 @@
1
1
  import base64
2
+ import hashlib
2
3
  import json
3
4
 
4
5
  from openai.types.chat import (
@@ -59,6 +60,11 @@ class AIMessages(list[AIMessageType]):
59
60
  messages.append(message)
60
61
  return messages
61
62
 
63
+ def get_prompt_cache_key(self, system_prompt: str | None = None) -> str:
64
+ if not system_prompt:
65
+ system_prompt = ""
66
+ return hashlib.sha256((system_prompt + json.dumps(self.to_prompt())).encode()).hexdigest()
67
+
62
68
  @staticmethod
63
69
  def document_to_prompt(document: Document) -> list[ChatCompletionContentPartParam]:
64
70
  """
@@ -48,15 +48,13 @@ def _process_messages(
48
48
  # Use AIMessages.to_prompt() for context
49
49
  context_messages = context.to_prompt()
50
50
 
51
- # Apply caching to context messages
52
- for msg in context_messages:
53
- if msg.get("role") == "user":
54
- # Add cache control to user messages in context
55
- msg["cache_control"] = { # type: ignore
56
- "type": "ephemeral",
57
- "ttl": "120s", # Cache for 2m
58
- }
59
- processed_messages.append(msg)
51
+ # Apply caching to last context message
52
+ context_messages[-1]["cache_control"] = { # type: ignore
53
+ "type": "ephemeral",
54
+ "ttl": "120s", # Cache for 2m
55
+ }
56
+
57
+ processed_messages.extend(context_messages)
60
58
 
61
59
  # Process regular messages without caching
62
60
  if messages:
@@ -108,9 +106,14 @@ async def _generate_with_retry(
108
106
  **options.to_openai_completion_kwargs(),
109
107
  }
110
108
 
109
+ if context:
110
+ completion_kwargs["prompt_cache_key"] = context.get_prompt_cache_key(options.system_prompt)
111
+
111
112
  for attempt in range(options.retries):
112
113
  try:
113
- with Laminar.start_as_current_span(model, span_type="LLM", input=messages) as span:
114
+ with Laminar.start_as_current_span(
115
+ model, span_type="LLM", input=processed_messages
116
+ ) as span:
114
117
  response = await _generate(model, processed_messages, completion_kwargs)
115
118
  span.set_attributes(response.get_laminar_metadata())
116
119
  Laminar.set_span_output(response.content)
@@ -103,6 +103,7 @@ def trace(
103
103
  ignore_inputs: list[str] | None = None,
104
104
  input_formatter: Callable[..., str] | None = None,
105
105
  output_formatter: Callable[..., str] | None = None,
106
+ preserve_global_context: bool = True,
106
107
  ) -> Callable[[Callable[P, R]], Callable[P, R]] | Callable[P, R]:
107
108
  """Decorator that wires Laminar tracing and observation into a function.
108
109
 
@@ -136,6 +137,7 @@ def trace(
136
137
  _ignore_inputs = ignore_inputs
137
138
  _input_formatter = input_formatter
138
139
  _output_formatter = output_formatter
140
+ _preserve_global_context = preserve_global_context
139
141
 
140
142
  # --- Check debug_only flag and environment variable ---
141
143
  if debug_only and os.getenv("LMNR_DEBUG", "").lower() != "true":
@@ -173,6 +175,8 @@ def trace(
173
175
  observe_params["input_formatter"] = _input_formatter
174
176
  if _output_formatter is not None:
175
177
  observe_params["output_formatter"] = _output_formatter
178
+ if _preserve_global_context:
179
+ observe_params["preserve_global_context"] = _preserve_global_context
176
180
 
177
181
  return observe_params
178
182
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "ai-pipeline-core"
3
- version = "0.1.3"
3
+ version = "0.1.5"
4
4
  description = "Core utilities for AI-powered processing pipelines using prefect"
5
5
  readme = "README.md"
6
6
  license = {text = "MIT"}
@@ -22,7 +22,7 @@ classifiers = [
22
22
  dependencies = [
23
23
  "httpx>=0.28.1",
24
24
  "Jinja2>=3.1.6",
25
- "lmnr>=0.7.4",
25
+ "lmnr>=0.7.5",
26
26
  "openai>=1.99.9",
27
27
  "prefect>=3.4.13",
28
28
  "pydantic-settings>=2.10.1",
@@ -140,7 +140,7 @@ reportIncompatibleVariableOverride = "error"
140
140
  reportMissingParameterType = "warning"
141
141
 
142
142
  [tool.bumpversion]
143
- current_version = "0.1.3"
143
+ current_version = "0.1.5"
144
144
  commit = true
145
145
  tag = true
146
146
  tag_name = "v{new_version}"