mirascope 2.0.0a4__py3-none-any.whl → 2.0.0a5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/api/_generated/__init__.py +17 -1
- mirascope/api/_generated/api_keys/__init__.py +7 -0
- mirascope/api/_generated/api_keys/client.py +453 -0
- mirascope/api/_generated/api_keys/raw_client.py +853 -0
- mirascope/api/_generated/api_keys/types/__init__.py +9 -0
- mirascope/api/_generated/api_keys/types/api_keys_create_response.py +36 -0
- mirascope/api/_generated/api_keys/types/api_keys_get_response.py +35 -0
- mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +35 -0
- mirascope/api/_generated/client.py +6 -0
- mirascope/api/_generated/environments/__init__.py +17 -0
- mirascope/api/_generated/environments/client.py +532 -0
- mirascope/api/_generated/environments/raw_client.py +1088 -0
- mirascope/api/_generated/environments/types/__init__.py +15 -0
- mirascope/api/_generated/environments/types/environments_create_response.py +26 -0
- mirascope/api/_generated/environments/types/environments_get_response.py +26 -0
- mirascope/api/_generated/environments/types/environments_list_response_item.py +26 -0
- mirascope/api/_generated/environments/types/environments_update_response.py +26 -0
- mirascope/api/_generated/organizations/client.py +36 -12
- mirascope/api/_generated/organizations/raw_client.py +32 -6
- mirascope/api/_generated/organizations/types/organizations_create_response.py +1 -0
- mirascope/api/_generated/organizations/types/organizations_get_response.py +1 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item.py +1 -0
- mirascope/api/_generated/organizations/types/organizations_update_response.py +1 -0
- mirascope/api/_generated/projects/client.py +34 -10
- mirascope/api/_generated/projects/raw_client.py +46 -4
- mirascope/api/_generated/projects/types/projects_create_response.py +1 -0
- mirascope/api/_generated/projects/types/projects_get_response.py +1 -0
- mirascope/api/_generated/projects/types/projects_list_response_item.py +1 -0
- mirascope/api/_generated/projects/types/projects_update_response.py +1 -0
- mirascope/api/_generated/reference.md +729 -4
- mirascope/llm/__init__.py +2 -2
- mirascope/llm/exceptions.py +28 -0
- mirascope/llm/providers/__init__.py +6 -4
- mirascope/llm/providers/anthropic/_utils/__init__.py +2 -0
- mirascope/llm/providers/anthropic/_utils/errors.py +46 -0
- mirascope/llm/providers/anthropic/beta_provider.py +6 -0
- mirascope/llm/providers/anthropic/provider.py +5 -0
- mirascope/llm/providers/base/__init__.py +2 -1
- mirascope/llm/providers/base/base_provider.py +173 -58
- mirascope/llm/providers/google/_utils/__init__.py +2 -0
- mirascope/llm/providers/google/_utils/errors.py +49 -0
- mirascope/llm/providers/google/provider.py +5 -4
- mirascope/llm/providers/mlx/_utils.py +8 -1
- mirascope/llm/providers/mlx/provider.py +8 -0
- mirascope/llm/providers/openai/__init__.py +10 -1
- mirascope/llm/providers/openai/_utils/__init__.py +5 -0
- mirascope/llm/providers/openai/_utils/errors.py +46 -0
- mirascope/llm/providers/openai/completions/base_provider.py +6 -6
- mirascope/llm/providers/openai/provider.py +14 -1
- mirascope/llm/providers/openai/responses/provider.py +13 -7
- mirascope/llm/providers/provider_registry.py +56 -3
- mirascope/ops/_internal/closure.py +62 -11
- {mirascope-2.0.0a4.dist-info → mirascope-2.0.0a5.dist-info}/METADATA +1 -1
- {mirascope-2.0.0a4.dist-info → mirascope-2.0.0a5.dist-info}/RECORD +56 -38
- mirascope/llm/providers/load_provider.py +0 -54
- {mirascope-2.0.0a4.dist-info → mirascope-2.0.0a5.dist-info}/WHEEL +0 -0
- {mirascope-2.0.0a4.dist-info → mirascope-2.0.0a5.dist-info}/licenses/LICENSE +0 -0
mirascope/llm/__init__.py
CHANGED
|
@@ -92,8 +92,8 @@ from .providers import (
|
|
|
92
92
|
Params,
|
|
93
93
|
Provider,
|
|
94
94
|
ProviderId,
|
|
95
|
-
load_provider,
|
|
96
95
|
register_provider,
|
|
96
|
+
reset_provider_registry,
|
|
97
97
|
)
|
|
98
98
|
from .responses import (
|
|
99
99
|
AsyncChunkIterator,
|
|
@@ -232,7 +232,6 @@ __all__ = [
|
|
|
232
232
|
"exceptions",
|
|
233
233
|
"format",
|
|
234
234
|
"formatting",
|
|
235
|
-
"load_provider",
|
|
236
235
|
"mcp",
|
|
237
236
|
"messages",
|
|
238
237
|
"model",
|
|
@@ -242,6 +241,7 @@ __all__ = [
|
|
|
242
241
|
"prompts",
|
|
243
242
|
"providers",
|
|
244
243
|
"register_provider",
|
|
244
|
+
"reset_provider_registry",
|
|
245
245
|
"responses",
|
|
246
246
|
"tool",
|
|
247
247
|
"tools",
|
mirascope/llm/exceptions.py
CHANGED
|
@@ -11,6 +11,7 @@ class MirascopeLLMError(Exception):
|
|
|
11
11
|
"""Base exception for all Mirascope LLM errors."""
|
|
12
12
|
|
|
13
13
|
original_exception: Exception | None
|
|
14
|
+
provider: "ProviderId | None"
|
|
14
15
|
|
|
15
16
|
|
|
16
17
|
class APIError(MirascopeLLMError):
|
|
@@ -18,6 +19,10 @@ class APIError(MirascopeLLMError):
|
|
|
18
19
|
|
|
19
20
|
status_code: int | None
|
|
20
21
|
|
|
22
|
+
def __init__(self, message: str, status_code: int | None = None) -> None:
|
|
23
|
+
super().__init__(message)
|
|
24
|
+
self.status_code = status_code
|
|
25
|
+
|
|
21
26
|
|
|
22
27
|
class ConnectionError(MirascopeLLMError):
|
|
23
28
|
"""Raised when unable to connect to the API (network issues, timeouts)."""
|
|
@@ -26,18 +31,30 @@ class ConnectionError(MirascopeLLMError):
|
|
|
26
31
|
class AuthenticationError(APIError):
|
|
27
32
|
"""Raised for authentication failures (401, invalid API keys)."""
|
|
28
33
|
|
|
34
|
+
def __init__(self, message: str, status_code: int | None = None) -> None:
|
|
35
|
+
super().__init__(message, status_code=status_code or 401)
|
|
36
|
+
|
|
29
37
|
|
|
30
38
|
class PermissionError(APIError):
|
|
31
39
|
"""Raised for permission/authorization failures (403)."""
|
|
32
40
|
|
|
41
|
+
def __init__(self, message: str, status_code: int | None = None) -> None:
|
|
42
|
+
super().__init__(message, status_code=status_code or 403)
|
|
43
|
+
|
|
33
44
|
|
|
34
45
|
class BadRequestError(APIError):
|
|
35
46
|
"""Raised for malformed requests (400, 422)."""
|
|
36
47
|
|
|
48
|
+
def __init__(self, message: str, status_code: int | None = None) -> None:
|
|
49
|
+
super().__init__(message, status_code=status_code or 400)
|
|
50
|
+
|
|
37
51
|
|
|
38
52
|
class NotFoundError(APIError):
|
|
39
53
|
"""Raised when requested resource is not found (404)."""
|
|
40
54
|
|
|
55
|
+
def __init__(self, message: str, status_code: int | None = None) -> None:
|
|
56
|
+
super().__init__(message, status_code=status_code or 404)
|
|
57
|
+
|
|
41
58
|
|
|
42
59
|
class ToolNotFoundError(MirascopeLLMError):
|
|
43
60
|
"""Raised if a tool_call cannot be converted to any corresponding tool."""
|
|
@@ -96,15 +113,26 @@ class FormattingModeNotSupportedError(FeatureNotSupportedError):
|
|
|
96
113
|
class RateLimitError(APIError):
|
|
97
114
|
"""Raised when rate limits are exceeded (429)."""
|
|
98
115
|
|
|
116
|
+
def __init__(self, message: str, status_code: int | None = None) -> None:
|
|
117
|
+
super().__init__(message, status_code=status_code or 429)
|
|
118
|
+
|
|
99
119
|
|
|
100
120
|
class ServerError(APIError):
|
|
101
121
|
"""Raised for server-side errors (500+)."""
|
|
102
122
|
|
|
123
|
+
def __init__(self, message: str, status_code: int | None = None) -> None:
|
|
124
|
+
super().__init__(message, status_code=status_code or 500)
|
|
125
|
+
|
|
103
126
|
|
|
104
127
|
class TimeoutError(MirascopeLLMError):
|
|
105
128
|
"""Raised when requests timeout or deadline exceeded."""
|
|
106
129
|
|
|
107
130
|
|
|
131
|
+
# This wraps the APIResponseValidationErrors that OpenAI and Anthropic both return.
|
|
132
|
+
class ResponseValidationError(MirascopeLLMError):
|
|
133
|
+
"""Raised when API response fails validation."""
|
|
134
|
+
|
|
135
|
+
|
|
108
136
|
class NoRegisteredProviderError(MirascopeLLMError):
|
|
109
137
|
"""Raised when no provider is registered for a given model_id."""
|
|
110
138
|
|
|
@@ -6,7 +6,6 @@ from .anthropic import (
|
|
|
6
6
|
)
|
|
7
7
|
from .base import BaseProvider, Params, Provider
|
|
8
8
|
from .google import GoogleModelId, GoogleProvider
|
|
9
|
-
from .load_provider import load, load_provider
|
|
10
9
|
from .mlx import MLXModelId, MLXProvider
|
|
11
10
|
from .model_id import ModelId
|
|
12
11
|
from .ollama import OllamaProvider
|
|
@@ -16,7 +15,11 @@ from .openai import (
|
|
|
16
15
|
)
|
|
17
16
|
from .openai.completions import BaseOpenAICompletionsProvider
|
|
18
17
|
from .provider_id import KNOWN_PROVIDER_IDS, ProviderId
|
|
19
|
-
from .provider_registry import
|
|
18
|
+
from .provider_registry import (
|
|
19
|
+
get_provider_for_model,
|
|
20
|
+
register_provider,
|
|
21
|
+
reset_provider_registry,
|
|
22
|
+
)
|
|
20
23
|
from .together import TogetherProvider
|
|
21
24
|
|
|
22
25
|
__all__ = [
|
|
@@ -38,7 +41,6 @@ __all__ = [
|
|
|
38
41
|
"ProviderId",
|
|
39
42
|
"TogetherProvider",
|
|
40
43
|
"get_provider_for_model",
|
|
41
|
-
"load",
|
|
42
|
-
"load_provider",
|
|
43
44
|
"register_provider",
|
|
45
|
+
"reset_provider_registry",
|
|
44
46
|
]
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""Anthropic error handling utilities."""
|
|
2
|
+
|
|
3
|
+
from anthropic import (
|
|
4
|
+
AnthropicError,
|
|
5
|
+
APIConnectionError as AnthropicAPIConnectionError,
|
|
6
|
+
APIResponseValidationError as AnthropicAPIResponseValidationError,
|
|
7
|
+
APITimeoutError as AnthropicAPITimeoutError,
|
|
8
|
+
AuthenticationError as AnthropicAuthenticationError,
|
|
9
|
+
BadRequestError as AnthropicBadRequestError,
|
|
10
|
+
ConflictError as AnthropicConflictError,
|
|
11
|
+
InternalServerError as AnthropicInternalServerError,
|
|
12
|
+
NotFoundError as AnthropicNotFoundError,
|
|
13
|
+
PermissionDeniedError as AnthropicPermissionDeniedError,
|
|
14
|
+
RateLimitError as AnthropicRateLimitError,
|
|
15
|
+
UnprocessableEntityError as AnthropicUnprocessableEntityError,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
from ....exceptions import (
|
|
19
|
+
APIError,
|
|
20
|
+
AuthenticationError,
|
|
21
|
+
BadRequestError,
|
|
22
|
+
ConnectionError,
|
|
23
|
+
NotFoundError,
|
|
24
|
+
PermissionError,
|
|
25
|
+
RateLimitError,
|
|
26
|
+
ResponseValidationError,
|
|
27
|
+
ServerError,
|
|
28
|
+
TimeoutError,
|
|
29
|
+
)
|
|
30
|
+
from ...base import ProviderErrorMap
|
|
31
|
+
|
|
32
|
+
# Shared error mapping used by both AnthropicProvider and AnthropicBetaProvider
|
|
33
|
+
ANTHROPIC_ERROR_MAP: ProviderErrorMap = {
|
|
34
|
+
AnthropicAuthenticationError: AuthenticationError,
|
|
35
|
+
AnthropicPermissionDeniedError: PermissionError,
|
|
36
|
+
AnthropicBadRequestError: BadRequestError,
|
|
37
|
+
AnthropicUnprocessableEntityError: BadRequestError,
|
|
38
|
+
AnthropicNotFoundError: NotFoundError,
|
|
39
|
+
AnthropicConflictError: BadRequestError,
|
|
40
|
+
AnthropicRateLimitError: RateLimitError,
|
|
41
|
+
AnthropicInternalServerError: ServerError,
|
|
42
|
+
AnthropicAPITimeoutError: TimeoutError,
|
|
43
|
+
AnthropicAPIConnectionError: ConnectionError,
|
|
44
|
+
AnthropicAPIResponseValidationError: ResponseValidationError,
|
|
45
|
+
AnthropicError: APIError, # Catch-all for unknown Anthropic errors
|
|
46
|
+
}
|
|
@@ -29,6 +29,7 @@ from ...tools import (
|
|
|
29
29
|
Toolkit,
|
|
30
30
|
)
|
|
31
31
|
from ..base import BaseProvider, Params
|
|
32
|
+
from . import _utils
|
|
32
33
|
from ._utils import beta_decode, beta_encode
|
|
33
34
|
from .model_id import model_name
|
|
34
35
|
|
|
@@ -38,6 +39,7 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
38
39
|
|
|
39
40
|
id = "anthropic-beta"
|
|
40
41
|
default_scope = "anthropic-beta/"
|
|
42
|
+
error_map = _utils.ANTHROPIC_ERROR_MAP
|
|
41
43
|
|
|
42
44
|
def __init__(
|
|
43
45
|
self, *, api_key: str | None = None, base_url: str | None = None
|
|
@@ -46,6 +48,10 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
46
48
|
self.client = Anthropic(api_key=api_key, base_url=base_url)
|
|
47
49
|
self.async_client = AsyncAnthropic(api_key=api_key, base_url=base_url)
|
|
48
50
|
|
|
51
|
+
def get_error_status(self, e: Exception) -> int | None:
|
|
52
|
+
"""Extract HTTP status code from Anthropic exception."""
|
|
53
|
+
return getattr(e, "status_code", None)
|
|
54
|
+
|
|
49
55
|
def _call(
|
|
50
56
|
self,
|
|
51
57
|
*,
|
|
@@ -55,6 +55,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
55
55
|
|
|
56
56
|
id = "anthropic"
|
|
57
57
|
default_scope = "anthropic/"
|
|
58
|
+
error_map = _utils.ANTHROPIC_ERROR_MAP
|
|
58
59
|
_beta_provider: AnthropicBetaProvider
|
|
59
60
|
|
|
60
61
|
def __init__(
|
|
@@ -65,6 +66,10 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
65
66
|
self.async_client = AsyncAnthropic(api_key=api_key, base_url=base_url)
|
|
66
67
|
self._beta_provider = AnthropicBetaProvider(api_key=api_key, base_url=base_url)
|
|
67
68
|
|
|
69
|
+
def get_error_status(self, e: Exception) -> int | None:
|
|
70
|
+
"""Extract HTTP status code from Anthropic exception."""
|
|
71
|
+
return getattr(e, "status_code", None)
|
|
72
|
+
|
|
68
73
|
def _call(
|
|
69
74
|
self,
|
|
70
75
|
*,
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""Base client interfaces and types."""
|
|
2
2
|
|
|
3
3
|
from . import _utils
|
|
4
|
-
from .base_provider import BaseProvider, Provider
|
|
4
|
+
from .base_provider import BaseProvider, Provider, ProviderErrorMap
|
|
5
5
|
from .kwargs import BaseKwargs, KwargsT
|
|
6
6
|
from .params import Params
|
|
7
7
|
|
|
@@ -11,5 +11,6 @@ __all__ = [
|
|
|
11
11
|
"KwargsT",
|
|
12
12
|
"Params",
|
|
13
13
|
"Provider",
|
|
14
|
+
"ProviderErrorMap",
|
|
14
15
|
"_utils",
|
|
15
16
|
]
|
|
@@ -3,18 +3,22 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
from abc import ABC, abstractmethod
|
|
6
|
-
from collections.abc import Sequence
|
|
7
|
-
from
|
|
6
|
+
from collections.abc import Callable, Generator, Mapping, Sequence
|
|
7
|
+
from contextlib import contextmanager
|
|
8
|
+
from typing import TYPE_CHECKING, Any, ClassVar, Generic, TypeAlias, cast, overload
|
|
8
9
|
from typing_extensions import TypeVar, Unpack
|
|
9
10
|
|
|
10
11
|
from ...context import Context, DepsT
|
|
12
|
+
from ...exceptions import APIError, MirascopeLLMError
|
|
11
13
|
from ...formatting import Format, FormattableT
|
|
12
14
|
from ...messages import Message, UserContent, user
|
|
13
15
|
from ...responses import (
|
|
16
|
+
AsyncChunkIterator,
|
|
14
17
|
AsyncContextResponse,
|
|
15
18
|
AsyncContextStreamResponse,
|
|
16
19
|
AsyncResponse,
|
|
17
20
|
AsyncStreamResponse,
|
|
21
|
+
ChunkIterator,
|
|
18
22
|
ContextResponse,
|
|
19
23
|
ContextStreamResponse,
|
|
20
24
|
Response,
|
|
@@ -33,6 +37,7 @@ from ...tools import (
|
|
|
33
37
|
from .params import Params
|
|
34
38
|
|
|
35
39
|
if TYPE_CHECKING:
|
|
40
|
+
from ...exceptions import MirascopeLLMError
|
|
36
41
|
from ..provider_id import ProviderId
|
|
37
42
|
|
|
38
43
|
ProviderClientT = TypeVar("ProviderClientT")
|
|
@@ -40,6 +45,18 @@ ProviderClientT = TypeVar("ProviderClientT")
|
|
|
40
45
|
Provider: TypeAlias = "BaseProvider[Any]"
|
|
41
46
|
"""Type alias for `BaseProvider` with any client type."""
|
|
42
47
|
|
|
48
|
+
ProviderErrorMap: TypeAlias = Mapping[
|
|
49
|
+
type[Exception],
|
|
50
|
+
"type[MirascopeLLMError] | Callable[[Exception], type[MirascopeLLMError]]",
|
|
51
|
+
]
|
|
52
|
+
"""Mapping from provider SDK exceptions to Mirascope error types.
|
|
53
|
+
|
|
54
|
+
Keys are provider SDK exception types (e.g., OpenAIError, AnthropicError).
|
|
55
|
+
Values can be:
|
|
56
|
+
- Error type: Simple 1:1 mapping (e.g., RateLimitError)
|
|
57
|
+
- Callable: Transform function returning error type based on exception details
|
|
58
|
+
"""
|
|
59
|
+
|
|
43
60
|
|
|
44
61
|
class BaseProvider(Generic[ProviderClientT], ABC):
|
|
45
62
|
"""Base abstract provider for LLM interactions.
|
|
@@ -59,8 +76,67 @@ class BaseProvider(Generic[ProviderClientT], ABC):
|
|
|
59
76
|
- ["anthropic/", "openai/"] - Multiple scopes (e.g., for AWS Bedrock)
|
|
60
77
|
"""
|
|
61
78
|
|
|
79
|
+
error_map: ClassVar[ProviderErrorMap]
|
|
80
|
+
"""Mapping from provider SDK exceptions to Mirascope error types.
|
|
81
|
+
|
|
82
|
+
Values can be:
|
|
83
|
+
- Error type: Simple 1:1 mapping (e.g., AnthropicRateLimitError -> RateLimitError)
|
|
84
|
+
- Callable: Transform function returning error type based on exception details
|
|
85
|
+
(e.g., lambda e: NotFoundError if e.code == "model_not_found" else BadRequestError)
|
|
86
|
+
|
|
87
|
+
The mapping is walked via the exception's MRO, allowing both specific error handling
|
|
88
|
+
and fallback to base SDK error types (e.g., AnthropicError -> APIError).
|
|
89
|
+
"""
|
|
90
|
+
|
|
62
91
|
client: ProviderClientT
|
|
63
92
|
|
|
93
|
+
@contextmanager
|
|
94
|
+
def _wrap_errors(self) -> Generator[None, None, None]:
|
|
95
|
+
"""Wrap provider API calls and convert errors to Mirascope exceptions.
|
|
96
|
+
|
|
97
|
+
Walks the exception's MRO to find the first matching error type in the
|
|
98
|
+
provider's error_map, allowing both specific error handling and fallback
|
|
99
|
+
to base SDK error types (e.g., AnthropicError -> APIError).
|
|
100
|
+
"""
|
|
101
|
+
try:
|
|
102
|
+
yield
|
|
103
|
+
except Exception as e:
|
|
104
|
+
# Walk MRO to find first matching error type in provider's error_map
|
|
105
|
+
for error_class in type(e).__mro__:
|
|
106
|
+
if error_class in self.error_map:
|
|
107
|
+
error_type_or_fn = self.error_map[error_class]
|
|
108
|
+
|
|
109
|
+
if isinstance(error_type_or_fn, type):
|
|
110
|
+
error_type = cast(type[MirascopeLLMError], error_type_or_fn)
|
|
111
|
+
else:
|
|
112
|
+
error_type = error_type_or_fn(e)
|
|
113
|
+
|
|
114
|
+
# Construct Mirascope error with metadata
|
|
115
|
+
error: MirascopeLLMError = error_type(str(e))
|
|
116
|
+
if isinstance(error, APIError):
|
|
117
|
+
error.status_code = self.get_error_status(e)
|
|
118
|
+
error.provider = self.id
|
|
119
|
+
error.original_exception = e
|
|
120
|
+
raise error from e
|
|
121
|
+
|
|
122
|
+
# Not in error_map - not a provider error, re-raise as-is
|
|
123
|
+
raise
|
|
124
|
+
|
|
125
|
+
def _wrap_iterator_errors(self, iterator: ChunkIterator) -> ChunkIterator:
|
|
126
|
+
"""Wrap sync chunk iterator to handle errors during iteration."""
|
|
127
|
+
# TODO: Consider moving this logic into BaseSyncStreamResponse if appropriate.
|
|
128
|
+
with self._wrap_errors():
|
|
129
|
+
yield from iterator
|
|
130
|
+
|
|
131
|
+
async def _wrap_async_iterator_errors(
|
|
132
|
+
self, iterator: AsyncChunkIterator
|
|
133
|
+
) -> AsyncChunkIterator:
|
|
134
|
+
"""Wrap async chunk iterator to handle errors during iteration."""
|
|
135
|
+
# TODO: Consider moving this logic into BaseAsyncStreamResponse if appropriate.
|
|
136
|
+
with self._wrap_errors():
|
|
137
|
+
async for chunk in iterator:
|
|
138
|
+
yield chunk
|
|
139
|
+
|
|
64
140
|
@overload
|
|
65
141
|
def call(
|
|
66
142
|
self,
|
|
@@ -121,13 +197,14 @@ class BaseProvider(Generic[ProviderClientT], ABC):
|
|
|
121
197
|
Returns:
|
|
122
198
|
An `llm.Response` object containing the LLM-generated content.
|
|
123
199
|
"""
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
200
|
+
with self._wrap_errors():
|
|
201
|
+
return self._call(
|
|
202
|
+
model_id=model_id,
|
|
203
|
+
messages=messages,
|
|
204
|
+
tools=tools,
|
|
205
|
+
format=format,
|
|
206
|
+
**params,
|
|
207
|
+
)
|
|
131
208
|
|
|
132
209
|
@abstractmethod
|
|
133
210
|
def _call(
|
|
@@ -215,14 +292,15 @@ class BaseProvider(Generic[ProviderClientT], ABC):
|
|
|
215
292
|
Returns:
|
|
216
293
|
An `llm.ContextResponse` object containing the LLM-generated content.
|
|
217
294
|
"""
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
295
|
+
with self._wrap_errors():
|
|
296
|
+
return self._context_call(
|
|
297
|
+
ctx=ctx,
|
|
298
|
+
model_id=model_id,
|
|
299
|
+
messages=messages,
|
|
300
|
+
tools=tools,
|
|
301
|
+
format=format,
|
|
302
|
+
**params,
|
|
303
|
+
)
|
|
226
304
|
|
|
227
305
|
@abstractmethod
|
|
228
306
|
def _context_call(
|
|
@@ -300,13 +378,14 @@ class BaseProvider(Generic[ProviderClientT], ABC):
|
|
|
300
378
|
Returns:
|
|
301
379
|
An `llm.AsyncResponse` object containing the LLM-generated content.
|
|
302
380
|
"""
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
381
|
+
with self._wrap_errors():
|
|
382
|
+
return await self._call_async(
|
|
383
|
+
model_id=model_id,
|
|
384
|
+
messages=messages,
|
|
385
|
+
tools=tools,
|
|
386
|
+
format=format,
|
|
387
|
+
**params,
|
|
388
|
+
)
|
|
310
389
|
|
|
311
390
|
@abstractmethod
|
|
312
391
|
async def _call_async(
|
|
@@ -394,14 +473,15 @@ class BaseProvider(Generic[ProviderClientT], ABC):
|
|
|
394
473
|
Returns:
|
|
395
474
|
An `llm.AsyncContextResponse` object containing the LLM-generated content.
|
|
396
475
|
"""
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
476
|
+
with self._wrap_errors():
|
|
477
|
+
return await self._context_call_async(
|
|
478
|
+
ctx=ctx,
|
|
479
|
+
model_id=model_id,
|
|
480
|
+
messages=messages,
|
|
481
|
+
tools=tools,
|
|
482
|
+
format=format,
|
|
483
|
+
**params,
|
|
484
|
+
)
|
|
405
485
|
|
|
406
486
|
@abstractmethod
|
|
407
487
|
async def _context_call_async(
|
|
@@ -479,13 +559,18 @@ class BaseProvider(Generic[ProviderClientT], ABC):
|
|
|
479
559
|
Returns:
|
|
480
560
|
An `llm.StreamResponse` object for iterating over the LLM-generated content.
|
|
481
561
|
"""
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
562
|
+
with self._wrap_errors():
|
|
563
|
+
stream_response = self._stream(
|
|
564
|
+
model_id=model_id,
|
|
565
|
+
messages=messages,
|
|
566
|
+
tools=tools,
|
|
567
|
+
format=format,
|
|
568
|
+
**params,
|
|
569
|
+
)
|
|
570
|
+
stream_response._chunk_iterator = self._wrap_iterator_errors( # pyright: ignore[reportPrivateUsage]
|
|
571
|
+
stream_response._chunk_iterator # pyright: ignore[reportPrivateUsage]
|
|
488
572
|
)
|
|
573
|
+
return stream_response
|
|
489
574
|
|
|
490
575
|
@abstractmethod
|
|
491
576
|
def _stream(
|
|
@@ -577,14 +662,19 @@ class BaseProvider(Generic[ProviderClientT], ABC):
|
|
|
577
662
|
Returns:
|
|
578
663
|
An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
|
|
579
664
|
"""
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
665
|
+
with self._wrap_errors():
|
|
666
|
+
stream_response = self._context_stream(
|
|
667
|
+
ctx=ctx,
|
|
668
|
+
model_id=model_id,
|
|
669
|
+
messages=messages,
|
|
670
|
+
tools=tools,
|
|
671
|
+
format=format,
|
|
672
|
+
**params,
|
|
673
|
+
)
|
|
674
|
+
stream_response._chunk_iterator = self._wrap_iterator_errors( # pyright: ignore[reportPrivateUsage]
|
|
675
|
+
stream_response._chunk_iterator # pyright: ignore[reportPrivateUsage]
|
|
587
676
|
)
|
|
677
|
+
return stream_response
|
|
588
678
|
|
|
589
679
|
@abstractmethod
|
|
590
680
|
def _context_stream(
|
|
@@ -664,13 +754,18 @@ class BaseProvider(Generic[ProviderClientT], ABC):
|
|
|
664
754
|
Returns:
|
|
665
755
|
An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
666
756
|
"""
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
757
|
+
with self._wrap_errors():
|
|
758
|
+
stream_response = await self._stream_async(
|
|
759
|
+
model_id=model_id,
|
|
760
|
+
messages=messages,
|
|
761
|
+
tools=tools,
|
|
762
|
+
format=format,
|
|
763
|
+
**params,
|
|
764
|
+
)
|
|
765
|
+
stream_response._chunk_iterator = self._wrap_async_iterator_errors( # pyright: ignore[reportPrivateUsage]
|
|
766
|
+
stream_response._chunk_iterator # pyright: ignore[reportPrivateUsage]
|
|
673
767
|
)
|
|
768
|
+
return stream_response
|
|
674
769
|
|
|
675
770
|
@abstractmethod
|
|
676
771
|
async def _stream_async(
|
|
@@ -764,14 +859,19 @@ class BaseProvider(Generic[ProviderClientT], ABC):
|
|
|
764
859
|
Returns:
|
|
765
860
|
An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
766
861
|
"""
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
862
|
+
with self._wrap_errors():
|
|
863
|
+
stream_response = await self._context_stream_async(
|
|
864
|
+
ctx=ctx,
|
|
865
|
+
model_id=model_id,
|
|
866
|
+
messages=messages,
|
|
867
|
+
tools=tools,
|
|
868
|
+
format=format,
|
|
869
|
+
**params,
|
|
870
|
+
)
|
|
871
|
+
stream_response._chunk_iterator = self._wrap_async_iterator_errors( # pyright: ignore[reportPrivateUsage]
|
|
872
|
+
stream_response._chunk_iterator # pyright: ignore[reportPrivateUsage]
|
|
774
873
|
)
|
|
874
|
+
return stream_response
|
|
775
875
|
|
|
776
876
|
@abstractmethod
|
|
777
877
|
async def _context_stream_async(
|
|
@@ -1383,3 +1483,18 @@ class BaseProvider(Generic[ProviderClientT], ABC):
|
|
|
1383
1483
|
format=response.format,
|
|
1384
1484
|
**params,
|
|
1385
1485
|
)
|
|
1486
|
+
|
|
1487
|
+
@abstractmethod
|
|
1488
|
+
def get_error_status(self, e: Exception) -> int | None:
|
|
1489
|
+
"""Extract HTTP status code from provider-specific exception.
|
|
1490
|
+
|
|
1491
|
+
Different SDKs store status codes differently (e.g., .status_code vs .code).
|
|
1492
|
+
Each provider implements this to handle their SDK's convention.
|
|
1493
|
+
|
|
1494
|
+
Args:
|
|
1495
|
+
e: The exception to extract status code from.
|
|
1496
|
+
|
|
1497
|
+
Returns:
|
|
1498
|
+
The HTTP status code if available, None otherwise.
|
|
1499
|
+
"""
|
|
1500
|
+
...
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
"""Google error handling utilities."""
|
|
2
|
+
|
|
3
|
+
from google.genai.errors import (
|
|
4
|
+
ClientError as GoogleClientError,
|
|
5
|
+
ServerError as GoogleServerError,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
from ....exceptions import (
|
|
9
|
+
APIError,
|
|
10
|
+
AuthenticationError,
|
|
11
|
+
BadRequestError,
|
|
12
|
+
NotFoundError,
|
|
13
|
+
PermissionError,
|
|
14
|
+
RateLimitError,
|
|
15
|
+
ServerError,
|
|
16
|
+
)
|
|
17
|
+
from ...base import ProviderErrorMap
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def map_google_error(e: Exception) -> type[APIError]:
|
|
21
|
+
"""Map Google error to appropriate Mirascope error type.
|
|
22
|
+
|
|
23
|
+
Google only provides ClientError (4xx) and ServerError (5xx) with status codes,
|
|
24
|
+
so we map based on status code and message patterns.
|
|
25
|
+
"""
|
|
26
|
+
if not isinstance(e, GoogleClientError | GoogleServerError):
|
|
27
|
+
return APIError
|
|
28
|
+
|
|
29
|
+
# Authentication errors (401) or 400 with "API key not valid"
|
|
30
|
+
if e.code == 401 or (e.code == 400 and "API key not valid" in str(e)):
|
|
31
|
+
return AuthenticationError
|
|
32
|
+
if e.code == 403:
|
|
33
|
+
return PermissionError
|
|
34
|
+
if e.code == 404:
|
|
35
|
+
return NotFoundError
|
|
36
|
+
if e.code == 429:
|
|
37
|
+
return RateLimitError
|
|
38
|
+
if e.code in (400, 422):
|
|
39
|
+
return BadRequestError
|
|
40
|
+
if isinstance(e, GoogleServerError) and e.code >= 500:
|
|
41
|
+
return ServerError
|
|
42
|
+
return APIError
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
# Shared error mapping for Google provider
|
|
46
|
+
GOOGLE_ERROR_MAP: ProviderErrorMap = {
|
|
47
|
+
GoogleClientError: map_google_error,
|
|
48
|
+
GoogleServerError: map_google_error,
|
|
49
|
+
}
|