ai-pipeline-core 0.2.9__py3-none-any.whl → 0.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. ai_pipeline_core/__init__.py +32 -5
  2. ai_pipeline_core/debug/__init__.py +26 -0
  3. ai_pipeline_core/debug/config.py +91 -0
  4. ai_pipeline_core/debug/content.py +705 -0
  5. ai_pipeline_core/debug/processor.py +99 -0
  6. ai_pipeline_core/debug/summary.py +236 -0
  7. ai_pipeline_core/debug/writer.py +913 -0
  8. ai_pipeline_core/deployment/__init__.py +46 -0
  9. ai_pipeline_core/deployment/base.py +681 -0
  10. ai_pipeline_core/deployment/contract.py +84 -0
  11. ai_pipeline_core/deployment/helpers.py +98 -0
  12. ai_pipeline_core/documents/flow_document.py +1 -1
  13. ai_pipeline_core/documents/task_document.py +1 -1
  14. ai_pipeline_core/documents/temporary_document.py +1 -1
  15. ai_pipeline_core/flow/config.py +13 -2
  16. ai_pipeline_core/flow/options.py +4 -4
  17. ai_pipeline_core/images/__init__.py +362 -0
  18. ai_pipeline_core/images/_processing.py +157 -0
  19. ai_pipeline_core/llm/ai_messages.py +25 -4
  20. ai_pipeline_core/llm/client.py +15 -19
  21. ai_pipeline_core/llm/model_response.py +5 -5
  22. ai_pipeline_core/llm/model_types.py +10 -13
  23. ai_pipeline_core/logging/logging_mixin.py +2 -2
  24. ai_pipeline_core/pipeline.py +1 -1
  25. ai_pipeline_core/progress.py +127 -0
  26. ai_pipeline_core/prompt_builder/__init__.py +5 -0
  27. ai_pipeline_core/prompt_builder/documents_prompt.jinja2 +23 -0
  28. ai_pipeline_core/prompt_builder/global_cache.py +78 -0
  29. ai_pipeline_core/prompt_builder/new_core_documents_prompt.jinja2 +6 -0
  30. ai_pipeline_core/prompt_builder/prompt_builder.py +253 -0
  31. ai_pipeline_core/prompt_builder/system_prompt.jinja2 +41 -0
  32. ai_pipeline_core/tracing.py +54 -2
  33. ai_pipeline_core/utils/deploy.py +214 -6
  34. ai_pipeline_core/utils/remote_deployment.py +37 -187
  35. {ai_pipeline_core-0.2.9.dist-info → ai_pipeline_core-0.3.3.dist-info}/METADATA +96 -27
  36. ai_pipeline_core-0.3.3.dist-info/RECORD +57 -0
  37. {ai_pipeline_core-0.2.9.dist-info → ai_pipeline_core-0.3.3.dist-info}/WHEEL +1 -1
  38. ai_pipeline_core/simple_runner/__init__.py +0 -14
  39. ai_pipeline_core/simple_runner/cli.py +0 -254
  40. ai_pipeline_core/simple_runner/simple_runner.py +0 -247
  41. ai_pipeline_core-0.2.9.dist-info/RECORD +0 -41
  42. {ai_pipeline_core-0.2.9.dist-info → ai_pipeline_core-0.3.3.dist-info}/licenses/LICENSE +0 -0
@@ -59,7 +59,7 @@ Quick Start:
59
59
  ... ) -> DocumentList:
60
60
  ... # Messages accept AIMessages or str. Wrap documents: AIMessages([doc])
61
61
  ... response = await llm.generate(
62
- ... "gpt-5",
62
+ ... "gpt-5.1",
63
63
  ... messages=AIMessages([documents[0]])
64
64
  ... )
65
65
  ... result = OutputDoc.create(
@@ -82,7 +82,8 @@ Optional Environment Variables:
82
82
  - LMNR_DEBUG: Set to "true" to enable debug-level traces
83
83
  """
84
84
 
85
- from . import llm
85
+ from . import llm, progress
86
+ from .deployment import DeploymentContext, DeploymentResult, PipelineDeployment
86
87
  from .documents import (
87
88
  Document,
88
89
  DocumentList,
@@ -94,6 +95,15 @@ from .documents import (
94
95
  sanitize_url,
95
96
  )
96
97
  from .flow import FlowConfig, FlowOptions
98
+ from .images import (
99
+ ImagePart,
100
+ ImagePreset,
101
+ ImageProcessingConfig,
102
+ ImageProcessingError,
103
+ ProcessedImage,
104
+ process_image,
105
+ process_image_to_documents,
106
+ )
97
107
  from .llm import (
98
108
  AIMessages,
99
109
  AIMessageType,
@@ -114,11 +124,13 @@ from .logging import (
114
124
  from .logging import get_pipeline_logger as get_logger
115
125
  from .pipeline import pipeline_flow, pipeline_task
116
126
  from .prefect import disable_run_logger, prefect_test_harness
127
+ from .prompt_builder import EnvironmentVariable, PromptBuilder
117
128
  from .prompt_manager import PromptManager
118
129
  from .settings import Settings
119
130
  from .tracing import TraceInfo, TraceLevel, set_trace_cost, trace
131
+ from .utils.remote_deployment import remote_deployment
120
132
 
121
- __version__ = "0.2.9"
133
+ __version__ = "0.3.3"
122
134
 
123
135
  __all__ = [
124
136
  # Config/Settings
@@ -148,6 +160,12 @@ __all__ = [
148
160
  # Prefect decorators (clean, no tracing)
149
161
  "prefect_test_harness",
150
162
  "disable_run_logger",
163
+ # Deployment
164
+ "PipelineDeployment",
165
+ "DeploymentContext",
166
+ "DeploymentResult",
167
+ "remote_deployment",
168
+ "progress",
151
169
  # LLM
152
170
  "llm", # for backward compatibility
153
171
  "generate",
@@ -163,8 +181,17 @@ __all__ = [
163
181
  "TraceLevel",
164
182
  "TraceInfo",
165
183
  "set_trace_cost",
184
+ # Prompt Builder
185
+ "PromptBuilder",
186
+ "EnvironmentVariable",
187
+ # Images
188
+ "process_image",
189
+ "process_image_to_documents",
190
+ "ImagePreset",
191
+ "ImageProcessingConfig",
192
+ "ProcessedImage",
193
+ "ImagePart",
194
+ "ImageProcessingError",
166
195
  # Utils
167
196
  "PromptManager",
168
- "generate",
169
- "generate_structured",
170
197
  ]
@@ -0,0 +1,26 @@
1
+ """Local trace debugging system for AI pipelines.
2
+
3
+ This module provides filesystem-based trace debugging that saves all spans
4
+ with their inputs/outputs for LLM-assisted debugging.
5
+
6
+ Enable by setting TRACE_DEBUG_PATH environment variable.
7
+ """
8
+
9
+ from .config import TraceDebugConfig
10
+ from .content import ArtifactStore, ContentRef, ContentWriter, reconstruct_span_content
11
+ from .processor import LocalDebugSpanProcessor
12
+ from .summary import generate_summary
13
+ from .writer import LocalTraceWriter, TraceState, WriteJob
14
+
15
+ __all__ = [
16
+ "TraceDebugConfig",
17
+ "ContentRef",
18
+ "ContentWriter",
19
+ "ArtifactStore",
20
+ "reconstruct_span_content",
21
+ "LocalDebugSpanProcessor",
22
+ "LocalTraceWriter",
23
+ "TraceState",
24
+ "WriteJob",
25
+ "generate_summary",
26
+ ]
@@ -0,0 +1,91 @@
1
+ """Configuration for local trace debugging."""
2
+
3
+ from pathlib import Path
4
+
5
+ from pydantic import BaseModel, ConfigDict, Field
6
+
7
+
8
+ class TraceDebugConfig(BaseModel):
9
+ """Configuration for local trace debugging.
10
+
11
+ Controls how traces are written to the local filesystem for debugging.
12
+ Enable by setting TRACE_DEBUG_PATH environment variable.
13
+ """
14
+
15
+ model_config = ConfigDict(frozen=True)
16
+
17
+ path: Path = Field(description="Directory for debug traces")
18
+ enabled: bool = Field(default=True, description="Whether debug tracing is enabled")
19
+
20
+ # Content size limits (Issue #2)
21
+ max_file_bytes: int = Field(
22
+ default=50_000,
23
+ description="Max bytes for input.yaml or output.yaml. Elements externalized to stay under.",
24
+ )
25
+ max_element_bytes: int = Field(
26
+ default=10_000,
27
+ description="Max bytes for single element. Above this, partial + artifact ref.",
28
+ )
29
+ element_excerpt_bytes: int = Field(
30
+ default=2_000,
31
+ description="Bytes of content to keep inline when element exceeds max_element_bytes.",
32
+ )
33
+ max_content_bytes: int = Field(
34
+ default=10_000_000,
35
+ description="Max bytes for any single artifact. Above this, truncate.",
36
+ )
37
+
38
+ # Image handling (Issue #7 - no changes per user)
39
+ extract_base64_images: bool = Field(
40
+ default=True,
41
+ description="Extract base64 images to artifact files",
42
+ )
43
+
44
+ # Span optimization (Issue #4)
45
+ merge_wrapper_spans: bool = Field(
46
+ default=True,
47
+ description="Merge Prefect wrapper spans with inner traced function spans",
48
+ )
49
+
50
+ # Events (Issue #12)
51
+ events_file_mode: str = Field(
52
+ default="errors_only",
53
+ description="When to write events.yaml: 'all', 'errors_only', 'none'",
54
+ )
55
+
56
+ # Indexes (Issue #1)
57
+ include_llm_index: bool = Field(
58
+ default=True,
59
+ description="Generate _llm_calls.yaml with LLM-specific details",
60
+ )
61
+ include_error_index: bool = Field(
62
+ default=True,
63
+ description="Generate _errors.yaml with failed span details",
64
+ )
65
+
66
+ # Cleanup
67
+ max_traces: int | None = Field(
68
+ default=None,
69
+ description="Max number of traces to keep. None for unlimited.",
70
+ )
71
+
72
+ # Security - default redaction patterns for common secrets
73
+ redact_patterns: tuple[str, ...] = Field(
74
+ default=(
75
+ r"sk-[a-zA-Z0-9]{20,}", # OpenAI API keys
76
+ r"sk-proj-[a-zA-Z0-9\-_]{20,}", # OpenAI project keys
77
+ r"AKIA[0-9A-Z]{16}", # AWS access keys
78
+ r"ghp_[a-zA-Z0-9]{36}", # GitHub personal tokens
79
+ r"gho_[a-zA-Z0-9]{36}", # GitHub OAuth tokens
80
+ r"xoxb-[a-zA-Z0-9\-]+", # Slack bot tokens
81
+ r"xoxp-[a-zA-Z0-9\-]+", # Slack user tokens
82
+ r"(?i)password\s*[:=]\s*['\"]?[^\s'\"]+", # Passwords
83
+ r"(?i)secret\s*[:=]\s*['\"]?[^\s'\"]+", # Secrets
84
+ r"(?i)api[_\-]?key\s*[:=]\s*['\"]?[^\s'\"]+", # API keys
85
+ r"(?i)bearer\s+[a-zA-Z0-9\-_\.]+", # Bearer tokens
86
+ ),
87
+ description="Regex patterns for secrets to redact",
88
+ )
89
+
90
+ # Summary
91
+ generate_summary: bool = Field(default=True, description="Generate _summary.md")