ai-lib-python 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. ai_lib_python/__init__.py +43 -0
  2. ai_lib_python/batch/__init__.py +15 -0
  3. ai_lib_python/batch/collector.py +244 -0
  4. ai_lib_python/batch/executor.py +224 -0
  5. ai_lib_python/cache/__init__.py +26 -0
  6. ai_lib_python/cache/backends.py +380 -0
  7. ai_lib_python/cache/key.py +237 -0
  8. ai_lib_python/cache/manager.py +332 -0
  9. ai_lib_python/client/__init__.py +37 -0
  10. ai_lib_python/client/builder.py +528 -0
  11. ai_lib_python/client/cancel.py +368 -0
  12. ai_lib_python/client/core.py +433 -0
  13. ai_lib_python/client/response.py +134 -0
  14. ai_lib_python/embeddings/__init__.py +36 -0
  15. ai_lib_python/embeddings/client.py +339 -0
  16. ai_lib_python/embeddings/types.py +234 -0
  17. ai_lib_python/embeddings/vectors.py +246 -0
  18. ai_lib_python/errors/__init__.py +41 -0
  19. ai_lib_python/errors/base.py +316 -0
  20. ai_lib_python/errors/classification.py +210 -0
  21. ai_lib_python/guardrails/__init__.py +35 -0
  22. ai_lib_python/guardrails/base.py +336 -0
  23. ai_lib_python/guardrails/filters.py +583 -0
  24. ai_lib_python/guardrails/validators.py +475 -0
  25. ai_lib_python/pipeline/__init__.py +55 -0
  26. ai_lib_python/pipeline/accumulate.py +248 -0
  27. ai_lib_python/pipeline/base.py +240 -0
  28. ai_lib_python/pipeline/decode.py +281 -0
  29. ai_lib_python/pipeline/event_map.py +506 -0
  30. ai_lib_python/pipeline/fan_out.py +284 -0
  31. ai_lib_python/pipeline/select.py +297 -0
  32. ai_lib_python/plugins/__init__.py +32 -0
  33. ai_lib_python/plugins/base.py +294 -0
  34. ai_lib_python/plugins/hooks.py +296 -0
  35. ai_lib_python/plugins/middleware.py +285 -0
  36. ai_lib_python/plugins/registry.py +294 -0
  37. ai_lib_python/protocol/__init__.py +71 -0
  38. ai_lib_python/protocol/loader.py +317 -0
  39. ai_lib_python/protocol/manifest.py +385 -0
  40. ai_lib_python/protocol/validator.py +460 -0
  41. ai_lib_python/py.typed +1 -0
  42. ai_lib_python/resilience/__init__.py +102 -0
  43. ai_lib_python/resilience/backpressure.py +225 -0
  44. ai_lib_python/resilience/circuit_breaker.py +318 -0
  45. ai_lib_python/resilience/executor.py +343 -0
  46. ai_lib_python/resilience/fallback.py +341 -0
  47. ai_lib_python/resilience/preflight.py +413 -0
  48. ai_lib_python/resilience/rate_limiter.py +291 -0
  49. ai_lib_python/resilience/retry.py +299 -0
  50. ai_lib_python/resilience/signals.py +283 -0
  51. ai_lib_python/routing/__init__.py +118 -0
  52. ai_lib_python/routing/manager.py +593 -0
  53. ai_lib_python/routing/strategy.py +345 -0
  54. ai_lib_python/routing/types.py +397 -0
  55. ai_lib_python/structured/__init__.py +33 -0
  56. ai_lib_python/structured/json_mode.py +281 -0
  57. ai_lib_python/structured/schema.py +316 -0
  58. ai_lib_python/structured/validator.py +334 -0
  59. ai_lib_python/telemetry/__init__.py +127 -0
  60. ai_lib_python/telemetry/exporters/__init__.py +9 -0
  61. ai_lib_python/telemetry/exporters/prometheus.py +111 -0
  62. ai_lib_python/telemetry/feedback.py +446 -0
  63. ai_lib_python/telemetry/health.py +409 -0
  64. ai_lib_python/telemetry/logger.py +389 -0
  65. ai_lib_python/telemetry/metrics.py +496 -0
  66. ai_lib_python/telemetry/tracer.py +473 -0
  67. ai_lib_python/tokens/__init__.py +25 -0
  68. ai_lib_python/tokens/counter.py +282 -0
  69. ai_lib_python/tokens/estimator.py +286 -0
  70. ai_lib_python/transport/__init__.py +34 -0
  71. ai_lib_python/transport/auth.py +141 -0
  72. ai_lib_python/transport/http.py +364 -0
  73. ai_lib_python/transport/pool.py +425 -0
  74. ai_lib_python/types/__init__.py +41 -0
  75. ai_lib_python/types/events.py +343 -0
  76. ai_lib_python/types/message.py +332 -0
  77. ai_lib_python/types/tool.py +191 -0
  78. ai_lib_python/utils/__init__.py +21 -0
  79. ai_lib_python/utils/tool_call_assembler.py +317 -0
  80. ai_lib_python-0.5.0.dist-info/METADATA +837 -0
  81. ai_lib_python-0.5.0.dist-info/RECORD +84 -0
  82. ai_lib_python-0.5.0.dist-info/WHEEL +4 -0
  83. ai_lib_python-0.5.0.dist-info/licenses/LICENSE-APACHE +201 -0
  84. ai_lib_python-0.5.0.dist-info/licenses/LICENSE-MIT +21 -0
@@ -0,0 +1,43 @@
1
+ """
2
+ ai-lib-python: Official Python Runtime for AI-Protocol
3
+
4
+ The canonical Pythonic implementation for unified AI model interaction.
5
+ Core principle: All logic is operators, all configuration is protocol.
6
+ """
7
+
8
+ from ai_lib_python.client import AiClient, AiClientBuilder, CallStats, ChatResponse
9
+ from ai_lib_python.errors import AiLibError, ProtocolError, TransportError
10
+ from ai_lib_python.types.events import StreamingEvent
11
+ from ai_lib_python.types.message import (
12
+ ContentBlock,
13
+ Message,
14
+ MessageContent,
15
+ MessageRole,
16
+ )
17
+ from ai_lib_python.types.tool import ToolCall, ToolDefinition
18
+
19
+ __version__ = "0.5.0"
20
+
21
+ __all__ = [
22
+ # Client
23
+ "AiClient",
24
+ "AiClientBuilder",
25
+ # Errors
26
+ "AiLibError",
27
+ "CallStats",
28
+ "ChatResponse",
29
+ "ContentBlock",
30
+ # Types - Message
31
+ "Message",
32
+ "MessageContent",
33
+ "MessageRole",
34
+ "ProtocolError",
35
+ # Types - Events
36
+ "StreamingEvent",
37
+ "ToolCall",
38
+ # Types - Tool
39
+ "ToolDefinition",
40
+ "TransportError",
41
+ # Version
42
+ "__version__",
43
+ ]
@@ -0,0 +1,15 @@
1
+ """
2
+ Batch processing module for ai-lib-python.
3
+
4
+ Provides request batching and batch execution utilities.
5
+ """
6
+
7
+ from ai_lib_python.batch.collector import BatchCollector, BatchConfig
8
+ from ai_lib_python.batch.executor import BatchExecutor, BatchResult
9
+
10
+ __all__ = [
11
+ "BatchCollector",
12
+ "BatchConfig",
13
+ "BatchExecutor",
14
+ "BatchResult",
15
+ ]
@@ -0,0 +1,244 @@
1
+ """
2
+ Batch collector for grouping requests.
3
+
4
+ Collects requests and groups them for batch execution.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import asyncio
10
+ import time
11
+ from dataclasses import dataclass, field
12
+ from typing import TYPE_CHECKING, Any, Generic, TypeVar
13
+
14
+ if TYPE_CHECKING:
15
+ from collections.abc import Awaitable, Callable
16
+
17
+ T = TypeVar("T")
18
+ R = TypeVar("R")
19
+
20
+
21
+ @dataclass
22
+ class BatchConfig:
23
+ """Configuration for batch collection.
24
+
25
+ Attributes:
26
+ max_batch_size: Maximum requests per batch
27
+ max_wait_ms: Maximum wait time before flushing batch
28
+ group_by: Function to determine grouping key
29
+ """
30
+
31
+ max_batch_size: int = 10
32
+ max_wait_ms: float = 100.0
33
+ group_by: Callable[[Any], str] | None = None
34
+
35
+ @classmethod
36
+ def default(cls) -> BatchConfig:
37
+ """Create default configuration."""
38
+ return cls()
39
+
40
+ @classmethod
41
+ def for_embeddings(cls) -> BatchConfig:
42
+ """Create configuration optimized for embeddings."""
43
+ return cls(
44
+ max_batch_size=100,
45
+ max_wait_ms=50.0,
46
+ )
47
+
48
+ @classmethod
49
+ def for_chat(cls) -> BatchConfig:
50
+ """Create configuration for chat completions."""
51
+ return cls(
52
+ max_batch_size=5,
53
+ max_wait_ms=10.0,
54
+ )
55
+
56
+
57
+ @dataclass
58
+ class PendingRequest(Generic[T]):
59
+ """A pending request waiting for batch execution.
60
+
61
+ Attributes:
62
+ data: Request data
63
+ future: Future to resolve with result
64
+ added_at: Timestamp when added
65
+ group_key: Grouping key
66
+ """
67
+
68
+ data: T
69
+ future: asyncio.Future[Any]
70
+ added_at: float = field(default_factory=time.time)
71
+ group_key: str = "_default_"
72
+
73
+
74
+ class BatchCollector(Generic[T, R]):
75
+ """Collects requests for batch processing.
76
+
77
+ Accumulates requests until batch size or time limit is reached,
78
+ then triggers batch execution.
79
+
80
+ Example:
81
+ >>> async def process_batch(items):
82
+ ... return [f"result_{i}" for i in range(len(items))]
83
+ ...
84
+ >>> collector = BatchCollector(
85
+ ... config=BatchConfig(max_batch_size=5),
86
+ ... executor=process_batch,
87
+ ... )
88
+ >>> result = await collector.add("request1")
89
+ """
90
+
91
+ def __init__(
92
+ self,
93
+ config: BatchConfig | None = None,
94
+ executor: Callable[[list[T]], Awaitable[list[R]]] | None = None,
95
+ ) -> None:
96
+ """Initialize batch collector.
97
+
98
+ Args:
99
+ config: Batch configuration
100
+ executor: Function to execute batches
101
+ """
102
+ self._config = config or BatchConfig.default()
103
+ self._executor = executor
104
+ self._pending: dict[str, list[PendingRequest[T]]] = {}
105
+ self._lock = asyncio.Lock()
106
+ self._timers: dict[str, asyncio.Task[None]] = {}
107
+ self._running = True
108
+
109
+ def set_executor(
110
+ self, executor: Callable[[list[T]], Awaitable[list[R]]]
111
+ ) -> None:
112
+ """Set the batch executor function.
113
+
114
+ Args:
115
+ executor: Function to execute batches
116
+ """
117
+ self._executor = executor
118
+
119
+ async def add(self, data: T) -> R:
120
+ """Add a request to the batch.
121
+
122
+ Args:
123
+ data: Request data
124
+
125
+ Returns:
126
+ Result from batch execution
127
+ """
128
+ if not self._running:
129
+ raise RuntimeError("Batch collector is stopped")
130
+
131
+ if self._executor is None:
132
+ raise RuntimeError("No executor set")
133
+
134
+ # Determine group key
135
+ group_key = (
136
+ self._config.group_by(data) if self._config.group_by else "_default_"
137
+ )
138
+
139
+ # Create future for result
140
+ loop = asyncio.get_event_loop()
141
+ future: asyncio.Future[R] = loop.create_future()
142
+
143
+ async with self._lock:
144
+ # Add to pending
145
+ if group_key not in self._pending:
146
+ self._pending[group_key] = []
147
+
148
+ self._pending[group_key].append(
149
+ PendingRequest(data=data, future=future, group_key=group_key)
150
+ )
151
+
152
+ # Check if batch is full
153
+ if len(self._pending[group_key]) >= self._config.max_batch_size:
154
+ await self._flush_group(group_key)
155
+ else:
156
+ # Start timer if not already running
157
+ if group_key not in self._timers or self._timers[group_key].done():
158
+ self._timers[group_key] = asyncio.create_task(
159
+ self._timer_flush(group_key)
160
+ )
161
+
162
+ return await future
163
+
164
+ async def _timer_flush(self, group_key: str) -> None:
165
+ """Flush group after timeout."""
166
+ await asyncio.sleep(self._config.max_wait_ms / 1000.0)
167
+
168
+ async with self._lock:
169
+ if self._pending.get(group_key):
170
+ await self._flush_group(group_key)
171
+
172
+ async def _flush_group(self, group_key: str) -> None:
173
+ """Flush a specific group.
174
+
175
+ Args:
176
+ group_key: Group to flush
177
+ """
178
+ if group_key not in self._pending or not self._pending[group_key]:
179
+ return
180
+
181
+ # Get pending requests
182
+ requests = self._pending.pop(group_key)
183
+
184
+ # Cancel timer
185
+ if group_key in self._timers:
186
+ self._timers[group_key].cancel()
187
+ del self._timers[group_key]
188
+
189
+ # Extract data
190
+ data_list = [r.data for r in requests]
191
+
192
+ try:
193
+ # Execute batch
194
+ results = await self._executor(data_list)
195
+
196
+ # Resolve futures
197
+ for request, result in zip(requests, results, strict=False):
198
+ if not request.future.done():
199
+ request.future.set_result(result)
200
+
201
+ except Exception as e:
202
+ # Reject all futures
203
+ for request in requests:
204
+ if not request.future.done():
205
+ request.future.set_exception(e)
206
+
207
+ async def flush(self) -> None:
208
+ """Flush all pending batches."""
209
+ async with self._lock:
210
+ for group_key in list(self._pending.keys()):
211
+ await self._flush_group(group_key)
212
+
213
+ async def stop(self) -> None:
214
+ """Stop the collector and flush pending requests."""
215
+ self._running = False
216
+ await self.flush()
217
+
218
+ # Cancel all timers
219
+ for timer in self._timers.values():
220
+ timer.cancel()
221
+ self._timers.clear()
222
+
223
+ def get_pending_count(self, group_key: str | None = None) -> int:
224
+ """Get count of pending requests.
225
+
226
+ Args:
227
+ group_key: Group to count (all if None)
228
+
229
+ Returns:
230
+ Number of pending requests
231
+ """
232
+ if group_key:
233
+ return len(self._pending.get(group_key, []))
234
+ return sum(len(p) for p in self._pending.values())
235
+
236
+ @property
237
+ def config(self) -> BatchConfig:
238
+ """Get batch configuration."""
239
+ return self._config
240
+
241
+ @property
242
+ def is_running(self) -> bool:
243
+ """Check if collector is running."""
244
+ return self._running
@@ -0,0 +1,224 @@
1
+ """
2
+ Batch executor for parallel request execution.
3
+
4
+ Executes multiple requests concurrently with rate limiting.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import asyncio
10
+ import time
11
+ from dataclasses import dataclass, field
12
+ from typing import TYPE_CHECKING, Generic, TypeVar
13
+
14
+ if TYPE_CHECKING:
15
+ from collections.abc import Awaitable, Callable, Sequence
16
+
17
+ T = TypeVar("T")
18
+ R = TypeVar("R")
19
+
20
+
21
+ @dataclass
22
+ class BatchResult(Generic[R]):
23
+ """Result of a batch execution.
24
+
25
+ Attributes:
26
+ results: List of results (None for failed requests)
27
+ errors: List of errors (None for successful requests)
28
+ total_time_ms: Total execution time in milliseconds
29
+ successful_count: Number of successful requests
30
+ failed_count: Number of failed requests
31
+ """
32
+
33
+ results: list[R | None] = field(default_factory=list)
34
+ errors: list[Exception | None] = field(default_factory=list)
35
+ total_time_ms: float = 0.0
36
+
37
+ @property
38
+ def successful_count(self) -> int:
39
+ """Get count of successful results."""
40
+ return sum(1 for e in self.errors if e is None)
41
+
42
+ @property
43
+ def failed_count(self) -> int:
44
+ """Get count of failed results."""
45
+ return sum(1 for e in self.errors if e is not None)
46
+
47
+ @property
48
+ def all_successful(self) -> bool:
49
+ """Check if all requests succeeded."""
50
+ return all(e is None for e in self.errors)
51
+
52
+ def get_successful_results(self) -> list[R]:
53
+ """Get only successful results."""
54
+ return [r for r, e in zip(self.results, self.errors, strict=False) if e is None and r is not None]
55
+
56
+ def get_errors(self) -> list[tuple[int, Exception]]:
57
+ """Get errors with their indices."""
58
+ return [(i, e) for i, e in enumerate(self.errors) if e is not None]
59
+
60
+
61
+ class BatchExecutor(Generic[T, R]):
62
+ """Executes batches of requests concurrently.
63
+
64
+ Provides controlled parallel execution with configurable
65
+ concurrency limits.
66
+
67
+ Example:
68
+ >>> async def call_api(item):
69
+ ... return await client.chat().user(item).execute()
70
+ ...
71
+ >>> executor = BatchExecutor(call_api, max_concurrent=5)
72
+ >>> result = await executor.execute(["Q1", "Q2", "Q3"])
73
+ >>> print(result.successful_count)
74
+ """
75
+
76
+ def __init__(
77
+ self,
78
+ operation: Callable[[T], Awaitable[R]],
79
+ max_concurrent: int = 10,
80
+ fail_fast: bool = False,
81
+ ) -> None:
82
+ """Initialize batch executor.
83
+
84
+ Args:
85
+ operation: Async function to execute for each item
86
+ max_concurrent: Maximum concurrent operations
87
+ fail_fast: Stop on first error
88
+ """
89
+ self._operation = operation
90
+ self._max_concurrent = max_concurrent
91
+ self._fail_fast = fail_fast
92
+ self._semaphore = asyncio.Semaphore(max_concurrent)
93
+
94
+ async def execute(self, items: Sequence[T]) -> BatchResult[R]:
95
+ """Execute operation for all items.
96
+
97
+ Args:
98
+ items: Items to process
99
+
100
+ Returns:
101
+ BatchResult with results and errors
102
+ """
103
+ start_time = time.time()
104
+ result = BatchResult[R]()
105
+
106
+ # Initialize result lists
107
+ result.results = [None] * len(items)
108
+ result.errors = [None] * len(items)
109
+
110
+ # Create tasks
111
+ tasks = [
112
+ asyncio.create_task(self._execute_one(item, idx, result))
113
+ for idx, item in enumerate(items)
114
+ ]
115
+
116
+ # Execute all tasks
117
+ if self._fail_fast:
118
+ # Stop on first error
119
+ try:
120
+ await asyncio.gather(*tasks)
121
+ except Exception:
122
+ # Cancel remaining tasks
123
+ for task in tasks:
124
+ if not task.done():
125
+ task.cancel()
126
+ # Wait for cancellation to complete
127
+ await asyncio.gather(*tasks, return_exceptions=True)
128
+ else:
129
+ # Continue on errors
130
+ await asyncio.gather(*tasks, return_exceptions=True)
131
+
132
+ result.total_time_ms = (time.time() - start_time) * 1000
133
+ return result
134
+
135
+ async def _execute_one(
136
+ self,
137
+ item: T,
138
+ index: int,
139
+ result: BatchResult[R],
140
+ ) -> None:
141
+ """Execute operation for a single item.
142
+
143
+ Args:
144
+ item: Item to process
145
+ index: Item index
146
+ result: Result object to update
147
+ """
148
+ async with self._semaphore:
149
+ try:
150
+ value = await self._operation(item)
151
+ result.results[index] = value
152
+ except Exception as e:
153
+ result.errors[index] = e
154
+ if self._fail_fast:
155
+ raise
156
+
157
+ async def execute_with_progress(
158
+ self,
159
+ items: Sequence[T],
160
+ on_progress: Callable[[int, int], None] | None = None,
161
+ ) -> BatchResult[R]:
162
+ """Execute with progress callback.
163
+
164
+ Args:
165
+ items: Items to process
166
+ on_progress: Callback(completed, total)
167
+
168
+ Returns:
169
+ BatchResult with results and errors
170
+ """
171
+ start_time = time.time()
172
+ result = BatchResult[R]()
173
+ completed = 0
174
+ total = len(items)
175
+
176
+ # Initialize result lists
177
+ result.results = [None] * total
178
+ result.errors = [None] * total
179
+
180
+ async def execute_with_callback(item: T, index: int) -> None:
181
+ nonlocal completed
182
+ async with self._semaphore:
183
+ try:
184
+ value = await self._operation(item)
185
+ result.results[index] = value
186
+ except Exception as e:
187
+ result.errors[index] = e
188
+
189
+ completed += 1
190
+ if on_progress:
191
+ on_progress(completed, total)
192
+
193
+ # Execute all tasks
194
+ tasks = [execute_with_callback(item, idx) for idx, item in enumerate(items)]
195
+ await asyncio.gather(*tasks, return_exceptions=True)
196
+
197
+ result.total_time_ms = (time.time() - start_time) * 1000
198
+ return result
199
+
200
+ @property
201
+ def max_concurrent(self) -> int:
202
+ """Get maximum concurrent operations."""
203
+ return self._max_concurrent
204
+
205
+
206
+ async def batch_execute(
207
+ items: Sequence[T],
208
+ operation: Callable[[T], Awaitable[R]],
209
+ max_concurrent: int = 10,
210
+ fail_fast: bool = False,
211
+ ) -> BatchResult[R]:
212
+ """Convenience function for batch execution.
213
+
214
+ Args:
215
+ items: Items to process
216
+ operation: Async function for each item
217
+ max_concurrent: Maximum concurrent operations
218
+ fail_fast: Stop on first error
219
+
220
+ Returns:
221
+ BatchResult with results and errors
222
+ """
223
+ executor = BatchExecutor(operation, max_concurrent, fail_fast)
224
+ return await executor.execute(items)
@@ -0,0 +1,26 @@
1
+ """
2
+ Response caching module for ai-lib-python.
3
+
4
+ Provides caching for AI responses with TTL and multiple backends.
5
+ """
6
+
7
+ from ai_lib_python.cache.backends import (
8
+ CacheBackend,
9
+ DiskCache,
10
+ MemoryCache,
11
+ NullCache,
12
+ )
13
+ from ai_lib_python.cache.key import CacheKey, CacheKeyGenerator
14
+ from ai_lib_python.cache.manager import CacheConfig, CacheManager, CacheStats
15
+
16
+ __all__ = [
17
+ "CacheBackend",
18
+ "CacheConfig",
19
+ "CacheKey",
20
+ "CacheKeyGenerator",
21
+ "CacheManager",
22
+ "CacheStats",
23
+ "DiskCache",
24
+ "MemoryCache",
25
+ "NullCache",
26
+ ]