maestro-core 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- maestro_core-0.2.0/.gitignore +7 -0
- maestro_core-0.2.0/PKG-INFO +84 -0
- maestro_core-0.2.0/README.md +54 -0
- maestro_core-0.2.0/maestro_core/__init__.py +12 -0
- maestro_core-0.2.0/maestro_core/api/__init__.py +136 -0
- maestro_core-0.2.0/maestro_core/api/client.py +132 -0
- maestro_core-0.2.0/maestro_core/api/error.py +155 -0
- maestro_core-0.2.0/maestro_core/api/prompt_cache.py +511 -0
- maestro_core-0.2.0/maestro_core/api/providers/__init__.py +242 -0
- maestro_core-0.2.0/maestro_core/api/providers/anthropic.py +420 -0
- maestro_core-0.2.0/maestro_core/api/providers/gemini.py +655 -0
- maestro_core-0.2.0/maestro_core/api/providers/ollama.py +208 -0
- maestro_core-0.2.0/maestro_core/api/providers/openai_compat.py +692 -0
- maestro_core-0.2.0/maestro_core/api/serialization.py +180 -0
- maestro_core-0.2.0/maestro_core/api/sse.py +175 -0
- maestro_core-0.2.0/maestro_core/api/types.py +364 -0
- maestro_core-0.2.0/maestro_core/config.py +171 -0
- maestro_core-0.2.0/maestro_core/engine.py +272 -0
- maestro_core-0.2.0/maestro_core/provider_router.py +292 -0
- maestro_core-0.2.0/maestro_core/tools/__init__.py +5 -0
- maestro_core-0.2.0/maestro_core/tools/base.py +87 -0
- maestro_core-0.2.0/maestro_core/tools/bash.py +82 -0
- maestro_core-0.2.0/maestro_core/tools/file_edit.py +98 -0
- maestro_core-0.2.0/maestro_core/tools/file_read.py +84 -0
- maestro_core-0.2.0/maestro_core/tools/file_write.py +52 -0
- maestro_core-0.2.0/maestro_core/tools/glob_tool.py +79 -0
- maestro_core-0.2.0/maestro_core/tools/grep.py +100 -0
- maestro_core-0.2.0/maestro_core/trust/__init__.py +16 -0
- maestro_core-0.2.0/maestro_core/trust/anonymizer.py +175 -0
- maestro_core-0.2.0/maestro_core/trust/detector.py +120 -0
- maestro_core-0.2.0/maestro_core/trust/patterns.py +157 -0
- maestro_core-0.2.0/maestro_core/trust/vault.py +106 -0
- maestro_core-0.2.0/pyproject.toml +48 -0
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: maestro-core
|
|
3
|
+
Version: 0.2.0
|
|
4
|
+
Summary: Core engine for Maestro AI agent — providers, trust layer, tools, conversation engine
|
|
5
|
+
Project-URL: Homepage, https://github.com/oke-pro/maestro
|
|
6
|
+
Project-URL: Repository, https://github.com/oke-pro/maestro
|
|
7
|
+
Author-email: Yann Chappuit <yann@oke.pro>
|
|
8
|
+
License: MIT
|
|
9
|
+
Keywords: ai,anonymization,llm,multi-provider,privacy,tool-calling,trust-layer
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
+
Classifier: Operating System :: OS Independent
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
18
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
19
|
+
Classifier: Typing :: Typed
|
|
20
|
+
Requires-Python: >=3.11
|
|
21
|
+
Requires-Dist: anthropic>=0.44.0
|
|
22
|
+
Requires-Dist: httpx>=0.27.0
|
|
23
|
+
Requires-Dist: openai>=1.60.0
|
|
24
|
+
Requires-Dist: pydantic>=2.10.0
|
|
25
|
+
Requires-Dist: pyyaml>=6.0
|
|
26
|
+
Provides-Extra: dev
|
|
27
|
+
Requires-Dist: pytest-asyncio>=0.24; extra == 'dev'
|
|
28
|
+
Requires-Dist: pytest>=8.0; extra == 'dev'
|
|
29
|
+
Description-Content-Type: text/markdown
|
|
30
|
+
|
|
31
|
+
# maestro-core
|
|
32
|
+
|
|
33
|
+
Core engine for Maestro AI agent. Multi-provider, privacy-first.
|
|
34
|
+
|
|
35
|
+
## Features
|
|
36
|
+
|
|
37
|
+
- **4 LLM Providers**: Anthropic (Claude), OpenAI (GPT), Google (Gemini), Ollama (local)
|
|
38
|
+
- **Trust Layer**: Automatic anonymization of sensitive data (NIR, SIRET, IBAN, emails, names, API keys)
|
|
39
|
+
- **Provider Router**: Auto-route to local/cloud based on data sensitivity
|
|
40
|
+
- **Conversation Engine**: Turn loop with tool calling
|
|
41
|
+
- **Tool Framework**: Base classes + 6 built-in tools (bash, file_read, file_write, file_edit, grep, glob)
|
|
42
|
+
|
|
43
|
+
## Installation
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
pip install maestro-core
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
## Usage
|
|
50
|
+
|
|
51
|
+
```python
|
|
52
|
+
from maestro_core.engine import ConversationEngine
|
|
53
|
+
from maestro_core.provider_router import ProviderRouter, RoutingMode
|
|
54
|
+
from maestro_core.tools import ToolRegistry
|
|
55
|
+
from maestro_core.tools.bash import BashTool
|
|
56
|
+
from maestro_core.config import MaestroConfig
|
|
57
|
+
from maestro_core.trust.anonymizer import TrustMode
|
|
58
|
+
|
|
59
|
+
# Setup
|
|
60
|
+
config = MaestroConfig()
|
|
61
|
+
router = ProviderRouter(config, RoutingMode.AUTO, TrustMode.STANDARD)
|
|
62
|
+
|
|
63
|
+
tools = ToolRegistry()
|
|
64
|
+
tools.register(BashTool())
|
|
65
|
+
|
|
66
|
+
engine = ConversationEngine(router=router, tools=tools)
|
|
67
|
+
|
|
68
|
+
# Chat
|
|
69
|
+
result = await engine.run_turn("List Python files", model="claude-sonnet-4")
|
|
70
|
+
print(result.response_text)
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
## Trust Layer
|
|
74
|
+
|
|
75
|
+
```python
|
|
76
|
+
from maestro_core.trust.anonymizer import Anonymizer, TrustMode
|
|
77
|
+
|
|
78
|
+
anon = Anonymizer(TrustMode.STANDARD)
|
|
79
|
+
safe = anon.anonymize_text("NIR 267041305561777 SIRET 41021468800013")
|
|
80
|
+
# → "NIR [NIR_1] SIRET [SIRET_1]"
|
|
81
|
+
|
|
82
|
+
restored = anon.deanonymize_text(safe)
|
|
83
|
+
# → "NIR 267041305561777 SIRET 41021468800013"
|
|
84
|
+
```
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# maestro-core
|
|
2
|
+
|
|
3
|
+
Core engine for Maestro AI agent. Multi-provider, privacy-first.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **4 LLM Providers**: Anthropic (Claude), OpenAI (GPT), Google (Gemini), Ollama (local)
|
|
8
|
+
- **Trust Layer**: Automatic anonymization of sensitive data (NIR, SIRET, IBAN, emails, names, API keys)
|
|
9
|
+
- **Provider Router**: Auto-route to local/cloud based on data sensitivity
|
|
10
|
+
- **Conversation Engine**: Turn loop with tool calling
|
|
11
|
+
- **Tool Framework**: Base classes + 6 built-in tools (bash, file_read, file_write, file_edit, grep, glob)
|
|
12
|
+
|
|
13
|
+
## Installation
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
pip install maestro-core
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
## Usage
|
|
20
|
+
|
|
21
|
+
```python
|
|
22
|
+
from maestro_core.engine import ConversationEngine
|
|
23
|
+
from maestro_core.provider_router import ProviderRouter, RoutingMode
|
|
24
|
+
from maestro_core.tools import ToolRegistry
|
|
25
|
+
from maestro_core.tools.bash import BashTool
|
|
26
|
+
from maestro_core.config import MaestroConfig
|
|
27
|
+
from maestro_core.trust.anonymizer import TrustMode
|
|
28
|
+
|
|
29
|
+
# Setup
|
|
30
|
+
config = MaestroConfig()
|
|
31
|
+
router = ProviderRouter(config, RoutingMode.AUTO, TrustMode.STANDARD)
|
|
32
|
+
|
|
33
|
+
tools = ToolRegistry()
|
|
34
|
+
tools.register(BashTool())
|
|
35
|
+
|
|
36
|
+
engine = ConversationEngine(router=router, tools=tools)
|
|
37
|
+
|
|
38
|
+
# Chat
|
|
39
|
+
result = await engine.run_turn("List Python files", model="claude-sonnet-4")
|
|
40
|
+
print(result.response_text)
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
## Trust Layer
|
|
44
|
+
|
|
45
|
+
```python
|
|
46
|
+
from maestro_core.trust.anonymizer import Anonymizer, TrustMode
|
|
47
|
+
|
|
48
|
+
anon = Anonymizer(TrustMode.STANDARD)
|
|
49
|
+
safe = anon.anonymize_text("NIR 267041305561777 SIRET 41021468800013")
|
|
50
|
+
# → "NIR [NIR_1] SIRET [SIRET_1]"
|
|
51
|
+
|
|
52
|
+
restored = anon.deanonymize_text(safe)
|
|
53
|
+
# → "NIR 267041305561777 SIRET 41021468800013"
|
|
54
|
+
```
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""Maestro Core — moteur multi-provider avec trust layer."""
|
|
2
|
+
|
|
3
|
+
__version__ = "0.1.0"
|
|
4
|
+
|
|
5
|
+
from .config import MaestroConfig
|
|
6
|
+
from .provider_router import ProviderRouter, RoutingMode
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"MaestroConfig",
|
|
10
|
+
"ProviderRouter",
|
|
11
|
+
"RoutingMode",
|
|
12
|
+
]
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
"""Package API — traduit de api/lib.rs.
|
|
2
|
+
|
|
3
|
+
Fournit les clients multi-provider (Anthropic, xAI, OpenAI),
|
|
4
|
+
le parsing SSE, le prompt cache et tous les types de l'API Messages.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .client import (
|
|
8
|
+
MessageStream,
|
|
9
|
+
ProviderClient,
|
|
10
|
+
read_xai_base_url,
|
|
11
|
+
)
|
|
12
|
+
from .error import (
|
|
13
|
+
ApiError,
|
|
14
|
+
ApiResponseError,
|
|
15
|
+
AuthError,
|
|
16
|
+
BackoffOverflowError,
|
|
17
|
+
ExpiredOAuthTokenError,
|
|
18
|
+
HttpError,
|
|
19
|
+
InvalidApiKeyEnvError,
|
|
20
|
+
InvalidSseFrameError,
|
|
21
|
+
IoError,
|
|
22
|
+
JsonError,
|
|
23
|
+
MissingCredentialsError,
|
|
24
|
+
RetriesExhaustedError,
|
|
25
|
+
)
|
|
26
|
+
from .prompt_cache import (
|
|
27
|
+
CacheBreakEvent,
|
|
28
|
+
PromptCache,
|
|
29
|
+
PromptCacheConfig,
|
|
30
|
+
PromptCachePaths,
|
|
31
|
+
PromptCacheRecord,
|
|
32
|
+
PromptCacheStats,
|
|
33
|
+
)
|
|
34
|
+
from .providers import (
|
|
35
|
+
ProviderKind,
|
|
36
|
+
detect_provider_kind,
|
|
37
|
+
max_tokens_for_model,
|
|
38
|
+
resolve_model_alias,
|
|
39
|
+
)
|
|
40
|
+
from .providers.anthropic import (
|
|
41
|
+
AnthropicClient,
|
|
42
|
+
AuthSource,
|
|
43
|
+
OAuthTokenSet,
|
|
44
|
+
oauth_token_is_expired,
|
|
45
|
+
read_base_url,
|
|
46
|
+
resolve_saved_oauth_token,
|
|
47
|
+
resolve_startup_auth_source,
|
|
48
|
+
)
|
|
49
|
+
from .providers.openai_compat import (
|
|
50
|
+
OpenAiCompatClient,
|
|
51
|
+
OpenAiCompatConfig,
|
|
52
|
+
)
|
|
53
|
+
from .sse import SseParser, parse_frame
|
|
54
|
+
from .types import (
|
|
55
|
+
ContentBlockDelta,
|
|
56
|
+
ContentBlockDeltaEvent,
|
|
57
|
+
ContentBlockStartEvent,
|
|
58
|
+
ContentBlockStopEvent,
|
|
59
|
+
InputContentBlock,
|
|
60
|
+
InputMessage,
|
|
61
|
+
MessageDelta,
|
|
62
|
+
MessageDeltaEvent,
|
|
63
|
+
MessageRequest,
|
|
64
|
+
MessageResponse,
|
|
65
|
+
MessageStartEvent,
|
|
66
|
+
MessageStopEvent,
|
|
67
|
+
OutputContentBlock,
|
|
68
|
+
StreamEvent,
|
|
69
|
+
ToolChoice,
|
|
70
|
+
ToolDefinition,
|
|
71
|
+
ToolResultContentBlock,
|
|
72
|
+
Usage,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
__all__ = [
|
|
76
|
+
# Client
|
|
77
|
+
"ProviderClient",
|
|
78
|
+
"MessageStream",
|
|
79
|
+
"read_xai_base_url",
|
|
80
|
+
# Error
|
|
81
|
+
"ApiError",
|
|
82
|
+
"ApiResponseError",
|
|
83
|
+
"AuthError",
|
|
84
|
+
"BackoffOverflowError",
|
|
85
|
+
"ExpiredOAuthTokenError",
|
|
86
|
+
"HttpError",
|
|
87
|
+
"InvalidApiKeyEnvError",
|
|
88
|
+
"InvalidSseFrameError",
|
|
89
|
+
"IoError",
|
|
90
|
+
"JsonError",
|
|
91
|
+
"MissingCredentialsError",
|
|
92
|
+
"RetriesExhaustedError",
|
|
93
|
+
# Prompt cache
|
|
94
|
+
"CacheBreakEvent",
|
|
95
|
+
"PromptCache",
|
|
96
|
+
"PromptCacheConfig",
|
|
97
|
+
"PromptCachePaths",
|
|
98
|
+
"PromptCacheRecord",
|
|
99
|
+
"PromptCacheStats",
|
|
100
|
+
# Providers
|
|
101
|
+
"AnthropicClient",
|
|
102
|
+
"AuthSource",
|
|
103
|
+
"OAuthTokenSet",
|
|
104
|
+
"OpenAiCompatClient",
|
|
105
|
+
"OpenAiCompatConfig",
|
|
106
|
+
"ProviderKind",
|
|
107
|
+
"detect_provider_kind",
|
|
108
|
+
"max_tokens_for_model",
|
|
109
|
+
"oauth_token_is_expired",
|
|
110
|
+
"read_base_url",
|
|
111
|
+
"resolve_model_alias",
|
|
112
|
+
"resolve_saved_oauth_token",
|
|
113
|
+
"resolve_startup_auth_source",
|
|
114
|
+
# SSE
|
|
115
|
+
"SseParser",
|
|
116
|
+
"parse_frame",
|
|
117
|
+
# Types
|
|
118
|
+
"ContentBlockDelta",
|
|
119
|
+
"ContentBlockDeltaEvent",
|
|
120
|
+
"ContentBlockStartEvent",
|
|
121
|
+
"ContentBlockStopEvent",
|
|
122
|
+
"InputContentBlock",
|
|
123
|
+
"InputMessage",
|
|
124
|
+
"MessageDelta",
|
|
125
|
+
"MessageDeltaEvent",
|
|
126
|
+
"MessageRequest",
|
|
127
|
+
"MessageResponse",
|
|
128
|
+
"MessageStartEvent",
|
|
129
|
+
"MessageStopEvent",
|
|
130
|
+
"OutputContentBlock",
|
|
131
|
+
"StreamEvent",
|
|
132
|
+
"ToolChoice",
|
|
133
|
+
"ToolDefinition",
|
|
134
|
+
"ToolResultContentBlock",
|
|
135
|
+
"Usage",
|
|
136
|
+
]
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
"""Client provider unifié — traduit de client.rs.
|
|
2
|
+
|
|
3
|
+
Abstraction qui route vers Anthropic, xAI ou OpenAI
|
|
4
|
+
selon le modèle demandé.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from typing import Optional, Union
|
|
10
|
+
|
|
11
|
+
from .error import ApiError
|
|
12
|
+
from .prompt_cache import PromptCache, PromptCacheRecord, PromptCacheStats
|
|
13
|
+
from .providers import ProviderKind, detect_provider_kind, resolve_model_alias
|
|
14
|
+
from .providers.anthropic import (
|
|
15
|
+
AnthropicClient,
|
|
16
|
+
AnthropicMessageStream,
|
|
17
|
+
AuthSource,
|
|
18
|
+
OAuthTokenSet,
|
|
19
|
+
oauth_token_is_expired,
|
|
20
|
+
read_base_url,
|
|
21
|
+
resolve_saved_oauth_token,
|
|
22
|
+
resolve_startup_auth_source,
|
|
23
|
+
)
|
|
24
|
+
from .providers.openai_compat import (
|
|
25
|
+
OpenAiCompatClient,
|
|
26
|
+
OpenAiCompatConfig,
|
|
27
|
+
OpenAiMessageStream,
|
|
28
|
+
)
|
|
29
|
+
from .types import MessageRequest, MessageResponse, StreamEvent
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# Union des streams
|
|
33
|
+
MessageStream = Union[AnthropicMessageStream, OpenAiMessageStream]
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class ProviderClient:
|
|
37
|
+
"""Client unifié multi-provider.
|
|
38
|
+
|
|
39
|
+
Route automatiquement vers le bon provider (Anthropic, xAI, OpenAI)
|
|
40
|
+
en fonction du modèle.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __init__(
|
|
44
|
+
self,
|
|
45
|
+
kind: ProviderKind,
|
|
46
|
+
anthropic: Optional[AnthropicClient] = None,
|
|
47
|
+
openai_compat: Optional[OpenAiCompatClient] = None,
|
|
48
|
+
) -> None:
|
|
49
|
+
self._kind = kind
|
|
50
|
+
self._anthropic = anthropic
|
|
51
|
+
self._openai_compat = openai_compat
|
|
52
|
+
|
|
53
|
+
@classmethod
|
|
54
|
+
def from_model(cls, model: str) -> ProviderClient:
|
|
55
|
+
"""Crée un client depuis un nom de modèle."""
|
|
56
|
+
return cls.from_model_with_anthropic_auth(model, None)
|
|
57
|
+
|
|
58
|
+
@classmethod
|
|
59
|
+
def from_model_with_anthropic_auth(
|
|
60
|
+
cls, model: str, anthropic_auth: Optional[AuthSource] = None
|
|
61
|
+
) -> ProviderClient:
|
|
62
|
+
"""Crée un client avec une source d'auth spécifique pour Anthropic."""
|
|
63
|
+
resolved = resolve_model_alias(model)
|
|
64
|
+
kind = detect_provider_kind(resolved)
|
|
65
|
+
|
|
66
|
+
if kind == ProviderKind.ANTHROPIC:
|
|
67
|
+
if anthropic_auth is not None:
|
|
68
|
+
client = AnthropicClient.from_auth(anthropic_auth)
|
|
69
|
+
else:
|
|
70
|
+
client = AnthropicClient.from_env()
|
|
71
|
+
return cls(kind=kind, anthropic=client)
|
|
72
|
+
elif kind == ProviderKind.XAI:
|
|
73
|
+
client = OpenAiCompatClient.from_env(OpenAiCompatConfig.xai())
|
|
74
|
+
return cls(kind=kind, openai_compat=client)
|
|
75
|
+
elif kind == ProviderKind.OPENAI:
|
|
76
|
+
client = OpenAiCompatClient.from_env(OpenAiCompatConfig.openai())
|
|
77
|
+
return cls(kind=kind, openai_compat=client)
|
|
78
|
+
else:
|
|
79
|
+
raise ApiError(f"unsupported provider kind: {kind}")
|
|
80
|
+
|
|
81
|
+
@property
|
|
82
|
+
def provider_kind(self) -> ProviderKind:
|
|
83
|
+
"""Type de provider utilisé."""
|
|
84
|
+
return self._kind
|
|
85
|
+
|
|
86
|
+
def with_prompt_cache(self, prompt_cache: PromptCache) -> ProviderClient:
|
|
87
|
+
"""Attache un prompt cache (Anthropic seulement)."""
|
|
88
|
+
if self._anthropic is not None:
|
|
89
|
+
self._anthropic = self._anthropic.with_prompt_cache(prompt_cache)
|
|
90
|
+
return self
|
|
91
|
+
|
|
92
|
+
def prompt_cache_stats(self) -> Optional[PromptCacheStats]:
|
|
93
|
+
"""Retourne les stats du prompt cache."""
|
|
94
|
+
if self._anthropic is not None:
|
|
95
|
+
return self._anthropic.prompt_cache_stats()
|
|
96
|
+
return None
|
|
97
|
+
|
|
98
|
+
def take_last_prompt_cache_record(self) -> Optional[PromptCacheRecord]:
|
|
99
|
+
"""Récupère le dernier record du prompt cache."""
|
|
100
|
+
if self._anthropic is not None:
|
|
101
|
+
return self._anthropic.take_last_prompt_cache_record()
|
|
102
|
+
return None
|
|
103
|
+
|
|
104
|
+
async def send_message(self, request: MessageRequest) -> MessageResponse:
|
|
105
|
+
"""Envoie un message et attend la réponse complète."""
|
|
106
|
+
if self._anthropic is not None:
|
|
107
|
+
return await self._anthropic.send_message(request)
|
|
108
|
+
elif self._openai_compat is not None:
|
|
109
|
+
return await self._openai_compat.send_message(request)
|
|
110
|
+
raise ApiError("no provider client configured")
|
|
111
|
+
|
|
112
|
+
async def stream_message(self, request: MessageRequest) -> MessageStream:
|
|
113
|
+
"""Envoie un message en mode streaming."""
|
|
114
|
+
if self._anthropic is not None:
|
|
115
|
+
return await self._anthropic.stream_message(request)
|
|
116
|
+
elif self._openai_compat is not None:
|
|
117
|
+
return await self._openai_compat.stream_message(request)
|
|
118
|
+
raise ApiError("no provider client configured")
|
|
119
|
+
|
|
120
|
+
async def close(self) -> None:
|
|
121
|
+
"""Ferme le client HTTP."""
|
|
122
|
+
if self._anthropic is not None:
|
|
123
|
+
await self._anthropic.close()
|
|
124
|
+
if self._openai_compat is not None:
|
|
125
|
+
await self._openai_compat.close()
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def read_xai_base_url() -> str:
|
|
129
|
+
"""Lit la base URL de xAI depuis l'environnement."""
|
|
130
|
+
from .providers.openai_compat import read_base_url
|
|
131
|
+
|
|
132
|
+
return read_base_url(OpenAiCompatConfig.xai())
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
"""Erreurs de l'API — traduit de error.rs."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from typing import Optional
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ApiError(Exception):
|
|
10
|
+
"""Classe de base pour toutes les erreurs API."""
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class MissingCredentialsError(ApiError):
|
|
15
|
+
"""Identifiants manquants pour un provider."""
|
|
16
|
+
|
|
17
|
+
provider: str
|
|
18
|
+
env_vars: tuple[str, ...]
|
|
19
|
+
|
|
20
|
+
def __str__(self) -> str:
|
|
21
|
+
joined = " or ".join(self.env_vars)
|
|
22
|
+
return (
|
|
23
|
+
f"missing {self.provider} credentials; export {joined} "
|
|
24
|
+
f"before calling the {self.provider} API"
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ExpiredOAuthTokenError(ApiError):
|
|
29
|
+
"""Le token OAuth sauvegardé est expiré sans refresh token disponible."""
|
|
30
|
+
|
|
31
|
+
def __str__(self) -> str:
|
|
32
|
+
return "saved OAuth token is expired and no refresh token is available"
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class AuthError(ApiError):
|
|
37
|
+
"""Erreur d'authentification."""
|
|
38
|
+
|
|
39
|
+
message: str
|
|
40
|
+
|
|
41
|
+
def __str__(self) -> str:
|
|
42
|
+
return f"auth error: {self.message}"
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@dataclass
|
|
46
|
+
class InvalidApiKeyEnvError(ApiError):
|
|
47
|
+
"""Variable d'environnement de credential invalide."""
|
|
48
|
+
|
|
49
|
+
original: Exception
|
|
50
|
+
|
|
51
|
+
def __str__(self) -> str:
|
|
52
|
+
return f"failed to read credential environment variable: {self.original}"
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@dataclass
|
|
56
|
+
class HttpError(ApiError):
|
|
57
|
+
"""Erreur HTTP (connexion, timeout, etc.)."""
|
|
58
|
+
|
|
59
|
+
original: Exception
|
|
60
|
+
|
|
61
|
+
def __str__(self) -> str:
|
|
62
|
+
return f"http error: {self.original}"
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@dataclass
|
|
66
|
+
class IoError(ApiError):
|
|
67
|
+
"""Erreur I/O."""
|
|
68
|
+
|
|
69
|
+
original: Exception
|
|
70
|
+
|
|
71
|
+
def __str__(self) -> str:
|
|
72
|
+
return f"io error: {self.original}"
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
@dataclass
|
|
76
|
+
class JsonError(ApiError):
|
|
77
|
+
"""Erreur de parsing JSON."""
|
|
78
|
+
|
|
79
|
+
original: Exception
|
|
80
|
+
|
|
81
|
+
def __str__(self) -> str:
|
|
82
|
+
return f"json error: {self.original}"
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
@dataclass
|
|
86
|
+
class ApiResponseError(ApiError):
|
|
87
|
+
"""Erreur retournée par l'API."""
|
|
88
|
+
|
|
89
|
+
status: int
|
|
90
|
+
error_type: Optional[str] = None
|
|
91
|
+
message: Optional[str] = None
|
|
92
|
+
body: str = ""
|
|
93
|
+
retryable: bool = False
|
|
94
|
+
|
|
95
|
+
def __str__(self) -> str:
|
|
96
|
+
if self.error_type and self.message:
|
|
97
|
+
return f"api returned {self.status} ({self.error_type}): {self.message}"
|
|
98
|
+
return f"api returned {self.status}: {self.body}"
|
|
99
|
+
|
|
100
|
+
def is_retryable(self) -> bool:
|
|
101
|
+
return self.retryable
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
@dataclass
|
|
105
|
+
class RetriesExhaustedError(ApiError):
|
|
106
|
+
"""Nombre maximum de retries atteint."""
|
|
107
|
+
|
|
108
|
+
attempts: int
|
|
109
|
+
last_error: ApiError
|
|
110
|
+
|
|
111
|
+
def __str__(self) -> str:
|
|
112
|
+
return f"api failed after {self.attempts} attempts: {self.last_error}"
|
|
113
|
+
|
|
114
|
+
def is_retryable(self) -> bool:
|
|
115
|
+
return _is_retryable(self.last_error)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
@dataclass
|
|
119
|
+
class InvalidSseFrameError(ApiError):
|
|
120
|
+
"""Frame SSE invalide."""
|
|
121
|
+
|
|
122
|
+
detail: str
|
|
123
|
+
|
|
124
|
+
def __str__(self) -> str:
|
|
125
|
+
return f"invalid sse frame: {self.detail}"
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
@dataclass
|
|
129
|
+
class BackoffOverflowError(ApiError):
|
|
130
|
+
"""Dépassement du calcul de backoff exponentiel."""
|
|
131
|
+
|
|
132
|
+
attempt: int
|
|
133
|
+
base_delay_ms: float
|
|
134
|
+
|
|
135
|
+
def __str__(self) -> str:
|
|
136
|
+
return (
|
|
137
|
+
f"retry backoff overflowed on attempt {self.attempt} "
|
|
138
|
+
f"with base delay {self.base_delay_ms}ms"
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def _is_retryable(error: ApiError) -> bool:
|
|
143
|
+
"""Détermine si une erreur est retryable."""
|
|
144
|
+
if isinstance(error, HttpError):
|
|
145
|
+
return True
|
|
146
|
+
if isinstance(error, ApiResponseError):
|
|
147
|
+
return error.retryable
|
|
148
|
+
if isinstance(error, RetriesExhaustedError):
|
|
149
|
+
return _is_retryable(error.last_error)
|
|
150
|
+
return False
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def is_retryable_status(status: int) -> bool:
|
|
154
|
+
"""Vérifie si un code HTTP est retryable."""
|
|
155
|
+
return status in (408, 409, 429, 500, 502, 503, 504)
|