ai-lib-python 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_lib_python/__init__.py +43 -0
- ai_lib_python/batch/__init__.py +15 -0
- ai_lib_python/batch/collector.py +244 -0
- ai_lib_python/batch/executor.py +224 -0
- ai_lib_python/cache/__init__.py +26 -0
- ai_lib_python/cache/backends.py +380 -0
- ai_lib_python/cache/key.py +237 -0
- ai_lib_python/cache/manager.py +332 -0
- ai_lib_python/client/__init__.py +37 -0
- ai_lib_python/client/builder.py +528 -0
- ai_lib_python/client/cancel.py +368 -0
- ai_lib_python/client/core.py +433 -0
- ai_lib_python/client/response.py +134 -0
- ai_lib_python/embeddings/__init__.py +36 -0
- ai_lib_python/embeddings/client.py +339 -0
- ai_lib_python/embeddings/types.py +234 -0
- ai_lib_python/embeddings/vectors.py +246 -0
- ai_lib_python/errors/__init__.py +41 -0
- ai_lib_python/errors/base.py +316 -0
- ai_lib_python/errors/classification.py +210 -0
- ai_lib_python/guardrails/__init__.py +35 -0
- ai_lib_python/guardrails/base.py +336 -0
- ai_lib_python/guardrails/filters.py +583 -0
- ai_lib_python/guardrails/validators.py +475 -0
- ai_lib_python/pipeline/__init__.py +55 -0
- ai_lib_python/pipeline/accumulate.py +248 -0
- ai_lib_python/pipeline/base.py +240 -0
- ai_lib_python/pipeline/decode.py +281 -0
- ai_lib_python/pipeline/event_map.py +506 -0
- ai_lib_python/pipeline/fan_out.py +284 -0
- ai_lib_python/pipeline/select.py +297 -0
- ai_lib_python/plugins/__init__.py +32 -0
- ai_lib_python/plugins/base.py +294 -0
- ai_lib_python/plugins/hooks.py +296 -0
- ai_lib_python/plugins/middleware.py +285 -0
- ai_lib_python/plugins/registry.py +294 -0
- ai_lib_python/protocol/__init__.py +71 -0
- ai_lib_python/protocol/loader.py +317 -0
- ai_lib_python/protocol/manifest.py +385 -0
- ai_lib_python/protocol/validator.py +460 -0
- ai_lib_python/py.typed +1 -0
- ai_lib_python/resilience/__init__.py +102 -0
- ai_lib_python/resilience/backpressure.py +225 -0
- ai_lib_python/resilience/circuit_breaker.py +318 -0
- ai_lib_python/resilience/executor.py +343 -0
- ai_lib_python/resilience/fallback.py +341 -0
- ai_lib_python/resilience/preflight.py +413 -0
- ai_lib_python/resilience/rate_limiter.py +291 -0
- ai_lib_python/resilience/retry.py +299 -0
- ai_lib_python/resilience/signals.py +283 -0
- ai_lib_python/routing/__init__.py +118 -0
- ai_lib_python/routing/manager.py +593 -0
- ai_lib_python/routing/strategy.py +345 -0
- ai_lib_python/routing/types.py +397 -0
- ai_lib_python/structured/__init__.py +33 -0
- ai_lib_python/structured/json_mode.py +281 -0
- ai_lib_python/structured/schema.py +316 -0
- ai_lib_python/structured/validator.py +334 -0
- ai_lib_python/telemetry/__init__.py +127 -0
- ai_lib_python/telemetry/exporters/__init__.py +9 -0
- ai_lib_python/telemetry/exporters/prometheus.py +111 -0
- ai_lib_python/telemetry/feedback.py +446 -0
- ai_lib_python/telemetry/health.py +409 -0
- ai_lib_python/telemetry/logger.py +389 -0
- ai_lib_python/telemetry/metrics.py +496 -0
- ai_lib_python/telemetry/tracer.py +473 -0
- ai_lib_python/tokens/__init__.py +25 -0
- ai_lib_python/tokens/counter.py +282 -0
- ai_lib_python/tokens/estimator.py +286 -0
- ai_lib_python/transport/__init__.py +34 -0
- ai_lib_python/transport/auth.py +141 -0
- ai_lib_python/transport/http.py +364 -0
- ai_lib_python/transport/pool.py +425 -0
- ai_lib_python/types/__init__.py +41 -0
- ai_lib_python/types/events.py +343 -0
- ai_lib_python/types/message.py +332 -0
- ai_lib_python/types/tool.py +191 -0
- ai_lib_python/utils/__init__.py +21 -0
- ai_lib_python/utils/tool_call_assembler.py +317 -0
- ai_lib_python-0.5.0.dist-info/METADATA +837 -0
- ai_lib_python-0.5.0.dist-info/RECORD +84 -0
- ai_lib_python-0.5.0.dist-info/WHEEL +4 -0
- ai_lib_python-0.5.0.dist-info/licenses/LICENSE-APACHE +201 -0
- ai_lib_python-0.5.0.dist-info/licenses/LICENSE-MIT +21 -0
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Vector operations for embeddings.
|
|
3
|
+
|
|
4
|
+
Provides similarity calculations and vector manipulation.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import math
|
|
10
|
+
from typing import TYPE_CHECKING
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from ai_lib_python.embeddings.types import Embedding
|
|
14
|
+
|
|
15
|
+
# Type alias for vectors
|
|
16
|
+
Vector = list[float]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def dot_product(a: Vector, b: Vector) -> float:
|
|
20
|
+
"""Calculate dot product of two vectors.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
a: First vector
|
|
24
|
+
b: Second vector
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
Dot product value
|
|
28
|
+
|
|
29
|
+
Raises:
|
|
30
|
+
ValueError: If vectors have different dimensions
|
|
31
|
+
"""
|
|
32
|
+
if len(a) != len(b):
|
|
33
|
+
raise ValueError(f"Vector dimensions must match: {len(a)} != {len(b)}")
|
|
34
|
+
|
|
35
|
+
return sum(x * y for x, y in zip(a, b, strict=True))
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def magnitude(v: Vector) -> float:
|
|
39
|
+
"""Calculate the magnitude (L2 norm) of a vector.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
v: Input vector
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
Magnitude value
|
|
46
|
+
"""
|
|
47
|
+
return math.sqrt(sum(x * x for x in v))
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def normalize_vector(v: Vector) -> Vector:
|
|
51
|
+
"""Normalize a vector to unit length.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
v: Input vector
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
Normalized vector
|
|
58
|
+
"""
|
|
59
|
+
mag = magnitude(v)
|
|
60
|
+
if mag == 0:
|
|
61
|
+
return v
|
|
62
|
+
return [x / mag for x in v]
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def cosine_similarity(a: Vector, b: Vector) -> float:
|
|
66
|
+
"""Calculate cosine similarity between two vectors.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
a: First vector
|
|
70
|
+
b: Second vector
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
Cosine similarity value (-1 to 1)
|
|
74
|
+
|
|
75
|
+
Raises:
|
|
76
|
+
ValueError: If vectors have different dimensions
|
|
77
|
+
"""
|
|
78
|
+
if len(a) != len(b):
|
|
79
|
+
raise ValueError(f"Vector dimensions must match: {len(a)} != {len(b)}")
|
|
80
|
+
|
|
81
|
+
dot = dot_product(a, b)
|
|
82
|
+
mag_a = magnitude(a)
|
|
83
|
+
mag_b = magnitude(b)
|
|
84
|
+
|
|
85
|
+
if mag_a == 0 or mag_b == 0:
|
|
86
|
+
return 0.0
|
|
87
|
+
|
|
88
|
+
return dot / (mag_a * mag_b)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def euclidean_distance(a: Vector, b: Vector) -> float:
|
|
92
|
+
"""Calculate Euclidean distance between two vectors.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
a: First vector
|
|
96
|
+
b: Second vector
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Euclidean distance value
|
|
100
|
+
|
|
101
|
+
Raises:
|
|
102
|
+
ValueError: If vectors have different dimensions
|
|
103
|
+
"""
|
|
104
|
+
if len(a) != len(b):
|
|
105
|
+
raise ValueError(f"Vector dimensions must match: {len(a)} != {len(b)}")
|
|
106
|
+
|
|
107
|
+
return math.sqrt(sum((x - y) ** 2 for x, y in zip(a, b, strict=True)))
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def manhattan_distance(a: Vector, b: Vector) -> float:
|
|
111
|
+
"""Calculate Manhattan (L1) distance between two vectors.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
a: First vector
|
|
115
|
+
b: Second vector
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
Manhattan distance value
|
|
119
|
+
|
|
120
|
+
Raises:
|
|
121
|
+
ValueError: If vectors have different dimensions
|
|
122
|
+
"""
|
|
123
|
+
if len(a) != len(b):
|
|
124
|
+
raise ValueError(f"Vector dimensions must match: {len(a)} != {len(b)}")
|
|
125
|
+
|
|
126
|
+
return sum(abs(x - y) for x, y in zip(a, b, strict=True))
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def find_most_similar(
|
|
130
|
+
query: Vector,
|
|
131
|
+
candidates: list[Vector],
|
|
132
|
+
top_k: int = 5,
|
|
133
|
+
metric: str = "cosine",
|
|
134
|
+
) -> list[tuple[int, float]]:
|
|
135
|
+
"""Find the most similar vectors to a query.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
query: Query vector
|
|
139
|
+
candidates: List of candidate vectors
|
|
140
|
+
top_k: Number of results to return
|
|
141
|
+
metric: Similarity metric ("cosine", "euclidean", "dot")
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
List of (index, score) tuples, sorted by similarity
|
|
145
|
+
|
|
146
|
+
Raises:
|
|
147
|
+
ValueError: If metric is not supported
|
|
148
|
+
"""
|
|
149
|
+
if metric == "cosine":
|
|
150
|
+
scores = [(i, cosine_similarity(query, c)) for i, c in enumerate(candidates)]
|
|
151
|
+
# Higher is better for cosine
|
|
152
|
+
scores.sort(key=lambda x: x[1], reverse=True)
|
|
153
|
+
elif metric == "euclidean":
|
|
154
|
+
scores = [(i, euclidean_distance(query, c)) for i, c in enumerate(candidates)]
|
|
155
|
+
# Lower is better for distance
|
|
156
|
+
scores.sort(key=lambda x: x[1])
|
|
157
|
+
elif metric == "dot":
|
|
158
|
+
scores = [(i, dot_product(query, c)) for i, c in enumerate(candidates)]
|
|
159
|
+
# Higher is better for dot product
|
|
160
|
+
scores.sort(key=lambda x: x[1], reverse=True)
|
|
161
|
+
else:
|
|
162
|
+
raise ValueError(f"Unknown metric: {metric}. Use 'cosine', 'euclidean', or 'dot'")
|
|
163
|
+
|
|
164
|
+
return scores[:top_k]
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def find_most_similar_embeddings(
|
|
168
|
+
query: Embedding,
|
|
169
|
+
candidates: list[Embedding],
|
|
170
|
+
top_k: int = 5,
|
|
171
|
+
metric: str = "cosine",
|
|
172
|
+
) -> list[tuple[Embedding, float]]:
|
|
173
|
+
"""Find the most similar embeddings to a query.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
query: Query embedding
|
|
177
|
+
candidates: List of candidate embeddings
|
|
178
|
+
top_k: Number of results to return
|
|
179
|
+
metric: Similarity metric
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
List of (embedding, score) tuples
|
|
183
|
+
"""
|
|
184
|
+
candidate_vectors = [e.vector for e in candidates]
|
|
185
|
+
results = find_most_similar(query.vector, candidate_vectors, top_k, metric)
|
|
186
|
+
return [(candidates[i], score) for i, score in results]
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def average_vectors(vectors: list[Vector]) -> Vector:
|
|
190
|
+
"""Calculate the average of multiple vectors.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
vectors: List of vectors
|
|
194
|
+
|
|
195
|
+
Returns:
|
|
196
|
+
Average vector
|
|
197
|
+
|
|
198
|
+
Raises:
|
|
199
|
+
ValueError: If vectors have different dimensions or list is empty
|
|
200
|
+
"""
|
|
201
|
+
if not vectors:
|
|
202
|
+
raise ValueError("Cannot average empty list of vectors")
|
|
203
|
+
|
|
204
|
+
dim = len(vectors[0])
|
|
205
|
+
if not all(len(v) == dim for v in vectors):
|
|
206
|
+
raise ValueError("All vectors must have the same dimensions")
|
|
207
|
+
|
|
208
|
+
n = len(vectors)
|
|
209
|
+
return [sum(v[i] for v in vectors) / n for i in range(dim)]
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def weighted_average_vectors(
|
|
213
|
+
vectors: list[Vector],
|
|
214
|
+
weights: list[float],
|
|
215
|
+
) -> Vector:
|
|
216
|
+
"""Calculate weighted average of vectors.
|
|
217
|
+
|
|
218
|
+
Args:
|
|
219
|
+
vectors: List of vectors
|
|
220
|
+
weights: List of weights (should sum to 1, or will be normalized)
|
|
221
|
+
|
|
222
|
+
Returns:
|
|
223
|
+
Weighted average vector
|
|
224
|
+
|
|
225
|
+
Raises:
|
|
226
|
+
ValueError: If vectors/weights mismatch or list is empty
|
|
227
|
+
"""
|
|
228
|
+
if not vectors:
|
|
229
|
+
raise ValueError("Cannot average empty list of vectors")
|
|
230
|
+
if len(vectors) != len(weights):
|
|
231
|
+
raise ValueError("Number of vectors must match number of weights")
|
|
232
|
+
|
|
233
|
+
# Normalize weights
|
|
234
|
+
total_weight = sum(weights)
|
|
235
|
+
if total_weight == 0:
|
|
236
|
+
raise ValueError("Total weight cannot be zero")
|
|
237
|
+
normalized_weights = [w / total_weight for w in weights]
|
|
238
|
+
|
|
239
|
+
dim = len(vectors[0])
|
|
240
|
+
if not all(len(v) == dim for v in vectors):
|
|
241
|
+
raise ValueError("All vectors must have the same dimensions")
|
|
242
|
+
|
|
243
|
+
return [
|
|
244
|
+
sum(v[i] * w for v, w in zip(vectors, normalized_weights, strict=True))
|
|
245
|
+
for i in range(dim)
|
|
246
|
+
]
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Error hierarchy for ai-lib-python.
|
|
3
|
+
|
|
4
|
+
Provides structured error types aligned with AI-Protocol error_classification.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from ai_lib_python.errors.base import (
|
|
8
|
+
AiLibError,
|
|
9
|
+
ErrorContext,
|
|
10
|
+
PipelineError,
|
|
11
|
+
ProtocolError,
|
|
12
|
+
RemoteError,
|
|
13
|
+
TransportError,
|
|
14
|
+
ValidationError,
|
|
15
|
+
)
|
|
16
|
+
from ai_lib_python.errors.base import (
|
|
17
|
+
RuntimeError as AiRuntimeError,
|
|
18
|
+
)
|
|
19
|
+
from ai_lib_python.errors.classification import (
|
|
20
|
+
ErrorClass,
|
|
21
|
+
classify_http_error,
|
|
22
|
+
is_fallbackable,
|
|
23
|
+
is_retryable,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
__all__ = [
|
|
27
|
+
# Base errors
|
|
28
|
+
"AiLibError",
|
|
29
|
+
"AiRuntimeError",
|
|
30
|
+
# Classification
|
|
31
|
+
"ErrorClass",
|
|
32
|
+
"ErrorContext",
|
|
33
|
+
"PipelineError",
|
|
34
|
+
"ProtocolError",
|
|
35
|
+
"RemoteError",
|
|
36
|
+
"TransportError",
|
|
37
|
+
"ValidationError",
|
|
38
|
+
"classify_http_error",
|
|
39
|
+
"is_fallbackable",
|
|
40
|
+
"is_retryable",
|
|
41
|
+
]
|
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base error classes for ai-lib-python.
|
|
3
|
+
|
|
4
|
+
Provides a layered error hierarchy:
|
|
5
|
+
- AiLibError: Base class for all library errors
|
|
6
|
+
- ProtocolError: Protocol loading/validation errors
|
|
7
|
+
- TransportError: HTTP/network errors
|
|
8
|
+
- PipelineError: Stream processing errors
|
|
9
|
+
- ValidationError: Request/response validation errors
|
|
10
|
+
- RuntimeError: General runtime errors
|
|
11
|
+
- RemoteError: Remote API errors with classification
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import contextlib
|
|
17
|
+
from dataclasses import dataclass, field
|
|
18
|
+
from typing import TYPE_CHECKING, Any
|
|
19
|
+
|
|
20
|
+
if TYPE_CHECKING:
|
|
21
|
+
from ai_lib_python.errors.classification import ErrorClass
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class ErrorContext:
|
|
26
|
+
"""Structured error context for diagnostics.
|
|
27
|
+
|
|
28
|
+
Provides actionable information for debugging and error handling.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
field_path: str | None = None
|
|
32
|
+
"""Path to the problematic field (e.g., 'messages[0].content')"""
|
|
33
|
+
|
|
34
|
+
details: dict[str, Any] = field(default_factory=dict)
|
|
35
|
+
"""Additional details about the error"""
|
|
36
|
+
|
|
37
|
+
source: str | None = None
|
|
38
|
+
"""Error source (e.g., 'protocol', 'transport', 'pipeline')"""
|
|
39
|
+
|
|
40
|
+
hint: str | None = None
|
|
41
|
+
"""Actionable hint for resolving the error"""
|
|
42
|
+
|
|
43
|
+
def __str__(self) -> str:
|
|
44
|
+
parts = []
|
|
45
|
+
if self.source:
|
|
46
|
+
parts.append(f"[{self.source}]")
|
|
47
|
+
if self.field_path:
|
|
48
|
+
parts.append(f"at '{self.field_path}'")
|
|
49
|
+
if self.hint:
|
|
50
|
+
parts.append(f"(hint: {self.hint})")
|
|
51
|
+
return " ".join(parts)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class AiLibError(Exception):
|
|
55
|
+
"""Base class for all ai-lib-python errors.
|
|
56
|
+
|
|
57
|
+
All errors from this library inherit from this class, making it easy
|
|
58
|
+
to catch all library errors with a single except clause.
|
|
59
|
+
|
|
60
|
+
Attributes:
|
|
61
|
+
message: Human-readable error message
|
|
62
|
+
context: Optional structured error context
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
def __init__(
|
|
66
|
+
self,
|
|
67
|
+
message: str,
|
|
68
|
+
context: ErrorContext | None = None,
|
|
69
|
+
) -> None:
|
|
70
|
+
self.message = message
|
|
71
|
+
self.context = context or ErrorContext()
|
|
72
|
+
super().__init__(self._format_message())
|
|
73
|
+
|
|
74
|
+
def _format_message(self) -> str:
|
|
75
|
+
"""Format the full error message."""
|
|
76
|
+
ctx_str = str(self.context)
|
|
77
|
+
if ctx_str:
|
|
78
|
+
return f"{self.message} {ctx_str}"
|
|
79
|
+
return self.message
|
|
80
|
+
|
|
81
|
+
def with_hint(self, hint: str) -> AiLibError:
|
|
82
|
+
"""Add a hint to this error."""
|
|
83
|
+
self.context.hint = hint
|
|
84
|
+
return self
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class ProtocolError(AiLibError):
|
|
88
|
+
"""Error during protocol loading or parsing.
|
|
89
|
+
|
|
90
|
+
Raised when:
|
|
91
|
+
- Protocol file not found
|
|
92
|
+
- Invalid YAML/JSON syntax
|
|
93
|
+
- Schema validation failure
|
|
94
|
+
- Unsupported protocol version
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
def __init__(
|
|
98
|
+
self,
|
|
99
|
+
message: str,
|
|
100
|
+
context: ErrorContext | None = None,
|
|
101
|
+
*,
|
|
102
|
+
protocol_path: str | None = None,
|
|
103
|
+
version: str | None = None,
|
|
104
|
+
) -> None:
|
|
105
|
+
ctx = context or ErrorContext(source="protocol")
|
|
106
|
+
if protocol_path:
|
|
107
|
+
ctx.details["protocol_path"] = protocol_path
|
|
108
|
+
if version:
|
|
109
|
+
ctx.details["version"] = version
|
|
110
|
+
super().__init__(message, ctx)
|
|
111
|
+
self.protocol_path = protocol_path
|
|
112
|
+
self.version = version
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class TransportError(AiLibError):
|
|
116
|
+
"""Error during HTTP transport.
|
|
117
|
+
|
|
118
|
+
Raised when:
|
|
119
|
+
- Network connection failure
|
|
120
|
+
- Timeout
|
|
121
|
+
- SSL/TLS errors
|
|
122
|
+
- Proxy errors
|
|
123
|
+
"""
|
|
124
|
+
|
|
125
|
+
def __init__(
|
|
126
|
+
self,
|
|
127
|
+
message: str,
|
|
128
|
+
context: ErrorContext | None = None,
|
|
129
|
+
*,
|
|
130
|
+
url: str | None = None,
|
|
131
|
+
status_code: int | None = None,
|
|
132
|
+
cause: Exception | None = None,
|
|
133
|
+
) -> None:
|
|
134
|
+
ctx = context or ErrorContext(source="transport")
|
|
135
|
+
if url:
|
|
136
|
+
ctx.details["url"] = url
|
|
137
|
+
if status_code:
|
|
138
|
+
ctx.details["status_code"] = status_code
|
|
139
|
+
super().__init__(message, ctx)
|
|
140
|
+
self.url = url
|
|
141
|
+
self.status_code = status_code
|
|
142
|
+
self.__cause__ = cause
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
class PipelineError(AiLibError):
|
|
146
|
+
"""Error during pipeline processing.
|
|
147
|
+
|
|
148
|
+
Raised when:
|
|
149
|
+
- Decoder fails to parse stream
|
|
150
|
+
- JSONPath evaluation error
|
|
151
|
+
- Event mapping error
|
|
152
|
+
- Accumulator state error
|
|
153
|
+
"""
|
|
154
|
+
|
|
155
|
+
def __init__(
|
|
156
|
+
self,
|
|
157
|
+
message: str,
|
|
158
|
+
context: ErrorContext | None = None,
|
|
159
|
+
*,
|
|
160
|
+
operator: str | None = None,
|
|
161
|
+
) -> None:
|
|
162
|
+
ctx = context or ErrorContext(source="pipeline")
|
|
163
|
+
if operator:
|
|
164
|
+
ctx.details["operator"] = operator
|
|
165
|
+
super().__init__(message, ctx)
|
|
166
|
+
self.operator = operator
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
class ValidationError(AiLibError):
|
|
170
|
+
"""Validation error for requests or responses.
|
|
171
|
+
|
|
172
|
+
Raised when:
|
|
173
|
+
- Invalid request parameters
|
|
174
|
+
- Missing required fields
|
|
175
|
+
- Type mismatch
|
|
176
|
+
- Capability mismatch (e.g., tools not supported)
|
|
177
|
+
"""
|
|
178
|
+
|
|
179
|
+
def __init__(
|
|
180
|
+
self,
|
|
181
|
+
message: str,
|
|
182
|
+
context: ErrorContext | None = None,
|
|
183
|
+
*,
|
|
184
|
+
field: str | None = None,
|
|
185
|
+
expected: Any = None,
|
|
186
|
+
actual: Any = None,
|
|
187
|
+
) -> None:
|
|
188
|
+
ctx = context or ErrorContext(source="validation")
|
|
189
|
+
if field:
|
|
190
|
+
ctx.field_path = field
|
|
191
|
+
if expected is not None:
|
|
192
|
+
ctx.details["expected"] = expected
|
|
193
|
+
if actual is not None:
|
|
194
|
+
ctx.details["actual"] = actual
|
|
195
|
+
super().__init__(message, ctx)
|
|
196
|
+
self.field = field
|
|
197
|
+
self.expected = expected
|
|
198
|
+
self.actual = actual
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
class RuntimeError(AiLibError):
|
|
202
|
+
"""General runtime error.
|
|
203
|
+
|
|
204
|
+
Raised for unexpected runtime conditions that don't fit other categories.
|
|
205
|
+
"""
|
|
206
|
+
|
|
207
|
+
def __init__(
|
|
208
|
+
self,
|
|
209
|
+
message: str,
|
|
210
|
+
context: ErrorContext | None = None,
|
|
211
|
+
) -> None:
|
|
212
|
+
ctx = context or ErrorContext(source="runtime")
|
|
213
|
+
super().__init__(message, ctx)
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
class RemoteError(AiLibError):
|
|
217
|
+
"""Error from remote API.
|
|
218
|
+
|
|
219
|
+
Represents errors returned by AI provider APIs, with structured
|
|
220
|
+
classification for retry and fallback decisions.
|
|
221
|
+
|
|
222
|
+
Attributes:
|
|
223
|
+
status_code: HTTP status code
|
|
224
|
+
error_class: Standardized error classification
|
|
225
|
+
retryable: Whether the error is retryable
|
|
226
|
+
fallbackable: Whether fallback to another model is appropriate
|
|
227
|
+
raw_error: Raw error response from the API
|
|
228
|
+
retry_after: Suggested retry delay in seconds (from header)
|
|
229
|
+
"""
|
|
230
|
+
|
|
231
|
+
def __init__(
|
|
232
|
+
self,
|
|
233
|
+
message: str,
|
|
234
|
+
*,
|
|
235
|
+
status_code: int,
|
|
236
|
+
error_class: ErrorClass,
|
|
237
|
+
retryable: bool = False,
|
|
238
|
+
fallbackable: bool = False,
|
|
239
|
+
raw_error: dict[str, Any] | None = None,
|
|
240
|
+
retry_after: float | None = None,
|
|
241
|
+
request_id: str | None = None,
|
|
242
|
+
) -> None:
|
|
243
|
+
ctx = ErrorContext(source="remote")
|
|
244
|
+
ctx.details["status_code"] = status_code
|
|
245
|
+
ctx.details["error_class"] = error_class.value
|
|
246
|
+
if request_id:
|
|
247
|
+
ctx.details["request_id"] = request_id
|
|
248
|
+
|
|
249
|
+
super().__init__(message, ctx)
|
|
250
|
+
|
|
251
|
+
self.status_code = status_code
|
|
252
|
+
self.error_class = error_class
|
|
253
|
+
self.retryable = retryable
|
|
254
|
+
self.fallbackable = fallbackable
|
|
255
|
+
self.raw_error = raw_error or {}
|
|
256
|
+
self.retry_after = retry_after
|
|
257
|
+
self.request_id = request_id
|
|
258
|
+
|
|
259
|
+
@classmethod
|
|
260
|
+
def from_response(
|
|
261
|
+
cls,
|
|
262
|
+
status_code: int,
|
|
263
|
+
body: dict[str, Any] | None = None,
|
|
264
|
+
headers: dict[str, str] | None = None,
|
|
265
|
+
provider_classification: dict[str, Any] | None = None,
|
|
266
|
+
) -> RemoteError:
|
|
267
|
+
"""Create RemoteError from HTTP response.
|
|
268
|
+
|
|
269
|
+
Args:
|
|
270
|
+
status_code: HTTP status code
|
|
271
|
+
body: Response body (parsed JSON)
|
|
272
|
+
headers: Response headers
|
|
273
|
+
provider_classification: Provider's error classification config
|
|
274
|
+
|
|
275
|
+
Returns:
|
|
276
|
+
RemoteError with appropriate classification
|
|
277
|
+
"""
|
|
278
|
+
from ai_lib_python.errors.classification import (
|
|
279
|
+
classify_http_error,
|
|
280
|
+
extract_error_message,
|
|
281
|
+
is_fallbackable,
|
|
282
|
+
is_retryable,
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
error_class = classify_http_error(status_code, body, provider_classification)
|
|
286
|
+
message = extract_error_message(body) or f"HTTP {status_code}"
|
|
287
|
+
|
|
288
|
+
# Extract retry-after header
|
|
289
|
+
retry_after = None
|
|
290
|
+
if headers:
|
|
291
|
+
retry_after_str = headers.get("retry-after") or headers.get("Retry-After")
|
|
292
|
+
if retry_after_str:
|
|
293
|
+
with contextlib.suppress(ValueError):
|
|
294
|
+
retry_after = float(retry_after_str)
|
|
295
|
+
|
|
296
|
+
# Extract request ID
|
|
297
|
+
request_id = None
|
|
298
|
+
if headers:
|
|
299
|
+
request_id = (
|
|
300
|
+
headers.get("x-request-id")
|
|
301
|
+
or headers.get("request-id")
|
|
302
|
+
or headers.get("X-Request-Id")
|
|
303
|
+
)
|
|
304
|
+
if body and "request_id" in body:
|
|
305
|
+
request_id = body["request_id"]
|
|
306
|
+
|
|
307
|
+
return cls(
|
|
308
|
+
message=message,
|
|
309
|
+
status_code=status_code,
|
|
310
|
+
error_class=error_class,
|
|
311
|
+
retryable=is_retryable(error_class),
|
|
312
|
+
fallbackable=is_fallbackable(error_class),
|
|
313
|
+
raw_error=body,
|
|
314
|
+
retry_after=retry_after,
|
|
315
|
+
request_id=request_id,
|
|
316
|
+
)
|