promptbuilder 0.4.19__py3-none-any.whl → 0.4.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,3 +2,4 @@ from .base_client import BaseLLMClient, BaseLLMClientAsync, CachedLLMClient, Cac
2
2
  from .types import Completion, Message, Choice, Usage, Response, Candidate, Content, Part, UsageMetadata, Tool, ToolConfig, ThinkingConfig, FunctionCall, FunctionDeclaration
3
3
  from .main import get_client, get_async_client, configure, sync_existing_clients_with_global_config, get_models_list
4
4
  from .utils import DecoratorConfigs, RpmLimitConfig, RetryConfig
5
+ from.exceptions import APIError, ClientError, ServerError
@@ -1,14 +1,21 @@
1
1
  import os
2
- from typing import AsyncIterator, Iterator
3
2
  import base64
3
+ from functools import wraps
4
+ from typing import AsyncIterator, Iterator, Callable, ParamSpec, Awaitable
5
+
4
6
  from pydantic import BaseModel
5
- from anthropic import Anthropic, AsyncAnthropic, Stream, AsyncStream
7
+ from anthropic import Anthropic, AsyncAnthropic, Stream, AsyncStream, APIError as AnthropicAPIError
6
8
  from anthropic.types import RawMessageStreamEvent
7
9
 
8
10
  from promptbuilder.llm_client.base_client import BaseLLMClient, BaseLLMClientAsync, ResultType
9
11
  from promptbuilder.llm_client.types import Response, Content, Candidate, UsageMetadata, Part, ThinkingConfig, Tool, ToolConfig, FunctionCall, MessageDict, Model
10
12
  from promptbuilder.llm_client.config import DecoratorConfigs
11
13
  from promptbuilder.prompt_builder import PromptBuilder
14
+ from promptbuilder.llm_client.utils import inherited_decorator
15
+ from promptbuilder.llm_client.exceptions import APIError
16
+
17
+
18
+ P = ParamSpec("P")
12
19
 
13
20
 
14
21
  def sum_optional_ints(a: int | None, b: int | None) -> int | None:
@@ -70,6 +77,26 @@ class AnthropicDefaultMaxTokensStrategy(DefaultMaxTokensStrategy):
70
77
  return 32000
71
78
 
72
79
 
80
+ @inherited_decorator
81
+ def _error_handler(func: Callable[P, Response]) -> Callable[P, Response]:
82
+ """
83
+ Decorator to catch error from anthropic and transform it into unified one
84
+ """
85
+ @wraps(func)
86
+ def wrapper(*args, **kwargs):
87
+ try:
88
+ return func(*args, **kwargs)
89
+ except AnthropicAPIError as e:
90
+ code = getattr(e, "status_code", None)
91
+ response = getattr(e, "response", None)
92
+ status = getattr(response, "reason_phrase", None)
93
+ response_json = {
94
+ "status": status,
95
+ "message": e.message,
96
+ }
97
+ raise APIError(code, response_json, response)
98
+ return wrapper
99
+
73
100
  class AnthropicStreamIterator:
74
101
  def __init__(self, anthropic_iterator: Stream[RawMessageStreamEvent]):
75
102
  self._anthropic_iterator = anthropic_iterator
@@ -109,13 +136,15 @@ class AnthropicLLMClient(BaseLLMClient):
109
136
  def __init__(
110
137
  self,
111
138
  model: str,
112
- api_key: str = os.getenv("ANTHROPIC_API_KEY"),
139
+ api_key: str | None = None,
113
140
  decorator_configs: DecoratorConfigs | None = None,
114
141
  default_thinking_config: ThinkingConfig | None = None,
115
142
  default_max_tokens: int | None = None,
116
143
  default_max_tokens_strategy: DefaultMaxTokensStrategy = AnthropicDefaultMaxTokensStrategy(),
117
144
  **kwargs,
118
145
  ):
146
+ if api_key is None:
147
+ api_key = os.getenv("ANTHROPIC_API_KEY")
119
148
  if api_key is None or not isinstance(api_key, str):
120
149
  raise ValueError("To create an anthropic llm client you need to either set the environment variable ANTHROPIC_API_KEY or pass the api_key in string format")
121
150
  super().__init__(AnthropicLLMClient.PROVIDER, model, decorator_configs=decorator_configs, default_thinking_config=default_thinking_config, default_max_tokens=default_max_tokens)
@@ -162,6 +191,7 @@ class AnthropicLLMClient(BaseLLMClient):
162
191
  anthropic_messages.append({"role": role, "content": content})
163
192
  return anthropic_messages
164
193
 
194
+ @_error_handler
165
195
  def create(
166
196
  self,
167
197
  messages: list[Content],
@@ -278,6 +308,7 @@ class AnthropicLLMClient(BaseLLMClient):
278
308
  else:
279
309
  raise ValueError(f"Unsupported result type: {result_type}")
280
310
 
311
+ @_error_handler
281
312
  def create_stream(
282
313
  self,
283
314
  messages: list[Content],
@@ -334,6 +365,26 @@ class AnthropicLLMClient(BaseLLMClient):
334
365
  return models
335
366
 
336
367
 
368
+ @inherited_decorator
369
+ def _error_handler_async(func: Callable[P, Awaitable[Response]]) -> Callable[P, Awaitable[Response]]:
370
+ """
371
+ Decorator to catch error from anthropic and transform it into unified one
372
+ """
373
+ @wraps(func)
374
+ async def wrapper(*args, **kwargs):
375
+ try:
376
+ return await func(*args, **kwargs)
377
+ except AnthropicAPIError as e:
378
+ code = getattr(e, "status_code", None)
379
+ response = getattr(e, "response", None)
380
+ status = getattr(response, "reason_phrase", None)
381
+ response_json = {
382
+ "status": status,
383
+ "message": e.message,
384
+ }
385
+ raise APIError(code, response_json, response)
386
+ return wrapper
387
+
337
388
  class AnthropicStreamIteratorAsync:
338
389
  def __init__(self, anthropic_iterator: AsyncStream[RawMessageStreamEvent]):
339
390
  self._anthropic_iterator = anthropic_iterator
@@ -373,13 +424,15 @@ class AnthropicLLMClientAsync(BaseLLMClientAsync):
373
424
  def __init__(
374
425
  self,
375
426
  model: str,
376
- api_key: str = os.getenv("ANTHROPIC_API_KEY"),
427
+ api_key: str | None = None,
377
428
  decorator_configs: DecoratorConfigs | None = None,
378
429
  default_thinking_config: ThinkingConfig | None = None,
379
430
  default_max_tokens: int | None = None,
380
431
  default_max_tokens_strategy: DefaultMaxTokensStrategy = AnthropicDefaultMaxTokensStrategy(),
381
432
  **kwargs,
382
433
  ):
434
+ if api_key is None:
435
+ api_key = os.getenv("ANTHROPIC_API_KEY")
383
436
  if api_key is None or not isinstance(api_key, str):
384
437
  raise ValueError("To create an anthropic llm client you need to either set the environment variable ANTHROPIC_API_KEY or pass the api_key in string format")
385
438
  super().__init__(AnthropicLLMClientAsync.PROVIDER, model, decorator_configs=decorator_configs, default_thinking_config=default_thinking_config, default_max_tokens=default_max_tokens)
@@ -391,6 +444,7 @@ class AnthropicLLMClientAsync(BaseLLMClientAsync):
391
444
  def api_key(self) -> str:
392
445
  return self._api_key
393
446
 
447
+ @_error_handler_async
394
448
  async def create(
395
449
  self,
396
450
  messages: list[Content],
@@ -513,6 +567,7 @@ class AnthropicLLMClientAsync(BaseLLMClientAsync):
513
567
  else:
514
568
  raise ValueError(f"Unsupported result_type: {result_type}. Supported types are: None, 'json', or a Pydantic model.")
515
569
 
570
+ @_error_handler_async
516
571
  async def create_stream(
517
572
  self,
518
573
  messages: list[Content],
@@ -1,24 +1,51 @@
1
1
  import os
2
- from typing import AsyncIterator, Iterator, Any
2
+ from functools import wraps
3
+ from typing import AsyncIterator, Iterator, Any, Callable, ParamSpec, Awaitable
3
4
 
4
5
  import boto3
6
+ from boto3.exceptions import Boto3Error
5
7
  import aioboto3
6
8
  from pydantic import BaseModel, ConfigDict
7
9
  from botocore.eventstream import EventStream
10
+ from botocore.exceptions import ClientError, BotoCoreError
8
11
 
9
12
  from promptbuilder.llm_client.base_client import BaseLLMClient, BaseLLMClientAsync, ResultType
10
13
  from promptbuilder.llm_client.types import Response, Content, Candidate, UsageMetadata, Part, ThinkingConfig, Tool, ToolConfig, FunctionCall, CustomApiKey, Model
11
14
  from promptbuilder.llm_client.config import DecoratorConfigs
12
15
  from promptbuilder.prompt_builder import PromptBuilder
16
+ from promptbuilder.llm_client.utils import inherited_decorator
17
+ from promptbuilder.llm_client.exceptions import APIError
13
18
 
14
19
 
20
+ P = ParamSpec("P")
21
+
15
22
  class BedrockApiKey(BaseModel, CustomApiKey):
16
23
  model_config = ConfigDict(frozen=True)
17
24
 
18
- aws_access_key_id: str = os.getenv("AWS_ACCESS_KEY_ID")
19
- aws_secret_access_key: str = os.getenv("AWS_SECRET_ACCESS_KEY")
20
- aws_region: str = os.getenv("AWS_DEFAULT_REGION", "us-east-1")
25
+ aws_access_key_id: str
26
+ aws_secret_access_key: str
27
+ aws_region: str
28
+
21
29
 
30
+ @inherited_decorator
31
+ def _error_handler(func: Callable[P, Response]) -> Callable[P, Response]:
32
+ """
33
+ Decorator to catch error from boto libs and transform it into unified one
34
+ """
35
+ @wraps(func)
36
+ def wrapper(*args, **kwargs):
37
+ try:
38
+ return func(*args, **kwargs)
39
+ except (Boto3Error, BotoCoreError, ClientError) as e:
40
+ code = None
41
+ response = None
42
+ status = None
43
+ response_json = {
44
+ "status": status,
45
+ "message": str(e.args),
46
+ }
47
+ raise APIError(code, response_json, response)
48
+ return wrapper
22
49
 
23
50
  class BedrockStreamIterator:
24
51
  def __init__(self, bedrock_iterator: EventStream):
@@ -51,13 +78,19 @@ class BedrockLLMClient(BaseLLMClient):
51
78
  def __init__(
52
79
  self,
53
80
  model: str,
54
- api_key: BedrockApiKey = BedrockApiKey(),
81
+ api_key: BedrockApiKey | None = None,
55
82
  decorator_configs: DecoratorConfigs | None = None,
56
83
  default_thinking_config: ThinkingConfig | None = None,
57
84
  default_max_tokens: int | None = None,
58
85
  **kwargs,
59
86
  ):
60
- if api_key is None or not isinstance(api_key, BedrockApiKey):
87
+ if api_key is None:
88
+ api_key = BedrockApiKey(
89
+ aws_access_key_id=os.getenv("AWS_ACCESS_KEY_ID"),
90
+ aws_secret_access_key=os.getenv("AWS_SECRET_ACCESS_KEY"),
91
+ aws_region=os.getenv("AWS_DEFAULT_REGION", "us-east-1"),
92
+ )
93
+ if not isinstance(api_key, BedrockApiKey):
61
94
  raise ValueError(
62
95
  "To create a bedrock llm client you need to either set the environment variables "
63
96
  "AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and optional AWS_DEFAULT_REGION or pass the api_key as BedrockApiKey instance"
@@ -69,6 +102,7 @@ class BedrockLLMClient(BaseLLMClient):
69
102
  def api_key(self) -> BedrockApiKey:
70
103
  return self._api_key
71
104
 
105
+ @_error_handler
72
106
  def create(
73
107
  self,
74
108
  messages: list[Content],
@@ -226,7 +260,8 @@ class BedrockLLMClient(BaseLLMClient):
226
260
  ),
227
261
  parsed=parsed_pydantic,
228
262
  )
229
-
263
+
264
+ @_error_handler
230
265
  def create_stream(
231
266
  self,
232
267
  messages: list[Content],
@@ -278,6 +313,27 @@ class BedrockLLMClient(BaseLLMClient):
278
313
  ))
279
314
  return models
280
315
 
316
+
317
+ @inherited_decorator
318
+ def _error_handler_async(func: Callable[P, Awaitable[Response]]) -> Callable[P, Awaitable[Response]]:
319
+ """
320
+ Decorator to catch error from boto libs and transform it into unified one
321
+ """
322
+ @wraps(func)
323
+ async def wrapper(*args, **kwargs):
324
+ try:
325
+ return await func(*args, **kwargs)
326
+ except (Boto3Error, BotoCoreError, ClientError) as e:
327
+ code = None
328
+ response = None
329
+ status = None
330
+ response_json = {
331
+ "status": status,
332
+ "message": str(e.args),
333
+ }
334
+ raise APIError(code, response_json, response)
335
+ return wrapper
336
+
281
337
  class BedrockStreamIteratorAsync:
282
338
  def __init__(self, aioboto_session: aioboto3.Session, **bedrock_kwargs):
283
339
  self._aioboto_session = aioboto_session
@@ -313,13 +369,19 @@ class BedrockLLMClientAsync(BaseLLMClientAsync):
313
369
  def __init__(
314
370
  self,
315
371
  model: str,
316
- api_key: BedrockApiKey = BedrockApiKey(),
372
+ api_key: BedrockApiKey | None = None,
317
373
  decorator_configs: DecoratorConfigs | None = None,
318
374
  default_thinking_config: ThinkingConfig | None = None,
319
375
  default_max_tokens: int | None = None,
320
376
  **kwargs,
321
377
  ):
322
- if api_key is None or not isinstance(api_key, BedrockApiKey):
378
+ if api_key is None:
379
+ api_key = BedrockApiKey(
380
+ aws_access_key_id=os.getenv("AWS_ACCESS_KEY_ID"),
381
+ aws_secret_access_key=os.getenv("AWS_SECRET_ACCESS_KEY"),
382
+ aws_region=os.getenv("AWS_DEFAULT_REGION", "us-east-1"),
383
+ )
384
+ if not isinstance(api_key, BedrockApiKey):
323
385
  raise ValueError(
324
386
  "To create a bedrock llm client you need to either set the environment variables "
325
387
  "AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and optional AWS_DEFAULT_REGION or pass the api_key as BedrockApiKey instance"
@@ -336,6 +398,7 @@ class BedrockLLMClientAsync(BaseLLMClientAsync):
336
398
  def api_key(self) -> BedrockApiKey:
337
399
  return self._api_key
338
400
 
401
+ @_error_handler_async
339
402
  async def create(
340
403
  self,
341
404
  messages: list[Content],
@@ -487,7 +550,8 @@ class BedrockLLMClientAsync(BaseLLMClientAsync):
487
550
  ),
488
551
  parsed=parsed_pydantic,
489
552
  )
490
-
553
+
554
+ @_error_handler_async
491
555
  async def create_stream(
492
556
  self,
493
557
  messages: list[Content],
@@ -0,0 +1,128 @@
1
+ import json
2
+ from typing import Any
3
+
4
+ import httpx
5
+ import aiohttp
6
+
7
+
8
+ class APIError(Exception):
9
+ """General errors raised by the llm clients"""
10
+ code: int | None = None
11
+ status: str | None = None
12
+ message: str | None = None
13
+ response: httpx.Response | aiohttp.ClientResponse | None = None
14
+
15
+ def __init__(self, code: int | None = None, response_json: dict[str, Any] = {}, response: httpx.Response | aiohttp.ClientResponse | None = None):
16
+ self.response = response
17
+ self.details = response_json
18
+ self.message = self._get_message(response_json)
19
+ self.status = self._get_status(response_json)
20
+ self.code = code if code else self._get_code(response_json)
21
+
22
+ super().__init__(f"{self.code} {self.status}. {self.details}")
23
+
24
+ def _get_status(self, response_json: dict[str, Any]) -> Any:
25
+ return response_json.get(
26
+ "status", response_json.get("error", {}).get("status", None)
27
+ )
28
+
29
+ def _get_message(self, response_json: dict[str, Any]) -> Any:
30
+ return response_json.get(
31
+ "message", response_json.get("error", {}).get("message", None)
32
+ )
33
+
34
+ def _get_code(self, response_json: dict[str, Any]) -> Any:
35
+ return response_json.get(
36
+ "code", response_json.get("error", {}).get("code", None)
37
+ )
38
+
39
+ def _to_replay_record(self) -> dict[str, Any]:
40
+ """Returns a dictionary representation of the error for replay recording."""
41
+ return {
42
+ "error": {
43
+ "code": self.code,
44
+ "message": self.message,
45
+ "status": self.status,
46
+ }
47
+ }
48
+
49
+ @classmethod
50
+ def raise_for_response(cls, response: httpx.Response):
51
+ """Raises an error with detailed error message if the response has an error status."""
52
+ if response.status_code == 200:
53
+ return
54
+
55
+ if isinstance(response, httpx.Response):
56
+ try:
57
+ response.read()
58
+ response_json = response.json()
59
+ except json.decoder.JSONDecodeError:
60
+ message = response.text
61
+ response_json = {
62
+ "message": message,
63
+ "status": response.reason_phrase,
64
+ }
65
+ else:
66
+ response_json = response.body_segments[0].get('error', {})
67
+
68
+ status_code = response.status_code
69
+ if 400 <= status_code < 500:
70
+ raise ClientError(status_code, response_json, response)
71
+ elif 500 <= status_code < 600:
72
+ raise ServerError(status_code, response_json, response)
73
+ else:
74
+ raise cls(status_code, response_json, response)
75
+
76
+ @classmethod
77
+ async def raise_for_async_response(cls, response: httpx.Response | aiohttp.ClientResponse):
78
+ """Raises an error with detailed error message if the response has an error status."""
79
+ status_code = 0
80
+ response_json = None
81
+ if isinstance(response, httpx.Response):
82
+ if response.status_code == 200:
83
+ return
84
+ try:
85
+ await response.aread()
86
+ response_json = response.json()
87
+ except json.decoder.JSONDecodeError:
88
+ message = response.text
89
+ response_json = {
90
+ "message": message,
91
+ "status": response.reason_phrase,
92
+ }
93
+ status_code = response.status_code
94
+ else:
95
+ try:
96
+ if isinstance(response, aiohttp.ClientResponse):
97
+ if response.status == 200:
98
+ return
99
+ try:
100
+ response_json = await response.json()
101
+ except aiohttp.client_exceptions.ContentTypeError:
102
+ message = await response.text()
103
+ response_json = {
104
+ "message": message,
105
+ "status": response.reason,
106
+ }
107
+ status_code = response.status
108
+ else:
109
+ response_json = response.body_segments[0].get("error", {})
110
+ except ImportError:
111
+ response_json = response.body_segments[0].get("error", {})
112
+
113
+ if 400 <= status_code < 500:
114
+ raise ClientError(status_code, response_json, response)
115
+ elif 500 <= status_code < 600:
116
+ raise ServerError(status_code, response_json, response)
117
+ else:
118
+ raise cls(status_code, response_json, response)
119
+
120
+
121
+ class ClientError(APIError):
122
+ """Client error raised by the llm clients"""
123
+ pass
124
+
125
+
126
+ class ServerError(APIError):
127
+ """Server error raised by the llm clients"""
128
+ pass
@@ -1,12 +1,42 @@
1
1
  import os
2
- from typing import AsyncIterator, Iterator
2
+ from functools import wraps
3
+ from typing import AsyncIterator, Iterator, Callable, ParamSpec, Awaitable
3
4
 
4
5
  from pydantic import BaseModel
6
+ from tenacity import RetryError
5
7
  from google.genai import Client, types
6
8
 
7
9
  from promptbuilder.llm_client.base_client import BaseLLMClient, BaseLLMClientAsync, ResultType
8
10
  from promptbuilder.llm_client.types import Response, Content, Part, ThinkingConfig, Tool, ToolConfig, Model
9
11
  from promptbuilder.llm_client.config import DecoratorConfigs
12
+ from promptbuilder.llm_client.utils import inherited_decorator
13
+ from promptbuilder.llm_client.exceptions import APIError
14
+
15
+
16
+ P = ParamSpec("P")
17
+
18
+
19
+ @inherited_decorator
20
+ def _error_handler(func: Callable[P, Response]) -> Callable[P, Response]:
21
+ """
22
+ Decorator to catch error from google.genai and transform it into unified one
23
+ """
24
+ @wraps(func)
25
+ def wrapper(*args, **kwargs):
26
+ try:
27
+ return func(*args, **kwargs)
28
+ except RetryError as retry_error:
29
+ e = retry_error.last_attempt._exception
30
+ if e is None:
31
+ raise APIError()
32
+ code = e.code
33
+ response_json = {
34
+ "status": e.status,
35
+ "message": e.message,
36
+ }
37
+ response = e.response
38
+ raise APIError(code, response_json, response)
39
+ return wrapper
10
40
 
11
41
 
12
42
  class GoogleLLMClient(BaseLLMClient):
@@ -15,12 +45,14 @@ class GoogleLLMClient(BaseLLMClient):
15
45
  def __init__(
16
46
  self,
17
47
  model: str,
18
- api_key: str = os.getenv("GOOGLE_API_KEY"),
48
+ api_key: str | None = None,
19
49
  decorator_configs: DecoratorConfigs | None = None,
20
50
  default_thinking_config: ThinkingConfig | None = None,
21
51
  default_max_tokens: int | None = None,
22
52
  **kwargs,
23
53
  ):
54
+ if api_key is None:
55
+ api_key = os.getenv("GOOGLE_API_KEY")
24
56
  if api_key is None or not isinstance(api_key, str):
25
57
  raise ValueError("To create a google llm client you need to either set the environment variable GOOGLE_API_KEY or pass the api_key in string format")
26
58
  super().__init__(GoogleLLMClient.PROVIDER, model, decorator_configs=decorator_configs, default_thinking_config=default_thinking_config, default_max_tokens=default_max_tokens)
@@ -53,7 +85,8 @@ class GoogleLLMClient(BaseLLMClient):
53
85
  )
54
86
  new_messages.append(new_message)
55
87
  return new_messages
56
-
88
+
89
+ @_error_handler
57
90
  def create(
58
91
  self,
59
92
  messages: list[Content],
@@ -103,7 +136,8 @@ class GoogleLLMClient(BaseLLMClient):
103
136
  )
104
137
  else:
105
138
  raise ValueError(f"Unsupported result_type: {result_type}. Supported types are: None, 'json', or a Pydantic model.")
106
-
139
+
140
+ @_error_handler
107
141
  def create_stream(
108
142
  self,
109
143
  messages: list[Content],
@@ -159,18 +193,42 @@ class GoogleLLMClient(BaseLLMClient):
159
193
  return models
160
194
 
161
195
 
196
+ @inherited_decorator
197
+ def _error_handler_async(func: Callable[P, Awaitable[Response]]) -> Callable[P, Awaitable[Response]]:
198
+ """
199
+ Decorator to catch error from google.genai and transform it into unified one
200
+ """
201
+ @wraps(func)
202
+ async def wrapper(*args, **kwargs):
203
+ try:
204
+ return await func(*args, **kwargs)
205
+ except RetryError as retry_error:
206
+ e = retry_error.last_attempt._exception
207
+ if e is None:
208
+ raise APIError()
209
+ code = e.code
210
+ response_json = {
211
+ "status": e.status,
212
+ "message": e.message,
213
+ }
214
+ response = e.response
215
+ raise APIError(code, response_json, response)
216
+ return wrapper
217
+
162
218
  class GoogleLLMClientAsync(BaseLLMClientAsync):
163
219
  PROVIDER: str = "google"
164
220
 
165
221
  def __init__(
166
222
  self,
167
223
  model: str,
168
- api_key: str = os.getenv("GOOGLE_API_KEY"),
224
+ api_key: str | None = None,
169
225
  decorator_configs: DecoratorConfigs | None = None,
170
226
  default_thinking_config: ThinkingConfig | None = None,
171
227
  default_max_tokens: int | None = None,
172
228
  **kwargs,
173
229
  ):
230
+ if api_key is None:
231
+ api_key = os.getenv("GOOGLE_API_KEY")
174
232
  if api_key is None or not isinstance(api_key, str):
175
233
  raise ValueError("To create a google llm client you need to either set the environment variable GOOGLE_API_KEY or pass the api_key in string format")
176
234
  super().__init__(GoogleLLMClientAsync.PROVIDER, model, decorator_configs=decorator_configs, default_thinking_config=default_thinking_config, default_max_tokens=default_max_tokens)
@@ -181,6 +239,7 @@ class GoogleLLMClientAsync(BaseLLMClientAsync):
181
239
  def api_key(self) -> str:
182
240
  return self._api_key
183
241
 
242
+ @_error_handler_async
184
243
  async def create(
185
244
  self,
186
245
  messages: list[Content],
@@ -222,7 +281,8 @@ class GoogleLLMClientAsync(BaseLLMClientAsync):
222
281
  )
223
282
  else:
224
283
  raise ValueError(f"Unsupported result_type: {result_type}. Supported types are: None, 'json', or a Pydantic model.")
225
-
284
+
285
+ @_error_handler_async
226
286
  async def create_stream(
227
287
  self,
228
288
  messages: list[Content],
@@ -40,10 +40,7 @@ def get_client(
40
40
  provider, model = full_model_name.split(":", 1)
41
41
  if provider in provider_to_client_class:
42
42
  client_class = provider_to_client_class[provider]
43
- if api_key is None:
44
- client = client_class(model, **kwargs)
45
- else:
46
- client = client_class(model, api_key, **kwargs)
43
+ client = client_class(model, api_key, **kwargs)
47
44
  else:
48
45
  if api_key is None:
49
46
  raise ValueError(f"You should directly provide api_key for this provider: {provider}")
@@ -87,10 +84,7 @@ def get_async_client(
87
84
  provider, model = full_model_name.split(":", 1)
88
85
  if provider in provider_to_client_class:
89
86
  client_class = provider_to_client_class[provider]
90
- if api_key is None:
91
- client = client_class(model, **kwargs)
92
- else:
93
- client = client_class(model, api_key, **kwargs)
87
+ client = client_class(model, api_key, **kwargs)
94
88
  else:
95
89
  if api_key is None:
96
90
  raise ValueError(f"You should directly provide api_key for this provider: {provider}")
@@ -1,17 +1,43 @@
1
1
  import os
2
2
  import json
3
3
  import base64
4
- from typing import AsyncIterator, Iterator
4
+ from functools import wraps
5
+ from typing import AsyncIterator, Iterator, Callable, ParamSpec, Awaitable
5
6
 
6
7
  from pydantic import BaseModel
7
- from openai import OpenAI, AsyncOpenAI, Stream, AsyncStream
8
+ from openai import OpenAI, AsyncOpenAI, Stream, AsyncStream, APIError as OpenAIAPIError
8
9
  from openai.types.responses import ResponseStreamEvent
9
10
 
10
11
  from promptbuilder.llm_client.base_client import BaseLLMClient, BaseLLMClientAsync, ResultType
11
12
  from promptbuilder.llm_client.types import Response, Content, Candidate, UsageMetadata, Part, ThinkingConfig, Tool, ToolConfig, FunctionCall, MessageDict, Model
12
13
  from promptbuilder.llm_client.config import DecoratorConfigs
14
+ from promptbuilder.llm_client.utils import inherited_decorator
15
+ from promptbuilder.llm_client.exceptions import APIError
13
16
 
14
17
 
18
+ P = ParamSpec("P")
19
+
20
+
21
+ @inherited_decorator
22
+ def _error_handler(func: Callable[P, Response]) -> Callable[P, Response]:
23
+ """
24
+ Decorator to catch error from openai and transform it into unified one
25
+ """
26
+ @wraps(func)
27
+ def wrapper(*args, **kwargs):
28
+ try:
29
+ return func(*args, **kwargs)
30
+ except OpenAIAPIError as e:
31
+ code = getattr(e, "status_code", None) or e.code
32
+ response = getattr(e, "response", None)
33
+ status = getattr(response, "reason_phrase", None)
34
+ response_json = {
35
+ "status": status,
36
+ "message": e.message,
37
+ }
38
+ raise APIError(code, response_json, response)
39
+ return wrapper
40
+
15
41
  class OpenaiStreamIterator:
16
42
  def __init__(self, openai_iterator: Stream[ResponseStreamEvent]):
17
43
  self._openai_iterator = openai_iterator
@@ -43,12 +69,14 @@ class OpenaiLLMClient(BaseLLMClient):
43
69
  def __init__(
44
70
  self,
45
71
  model: str,
46
- api_key: str = os.getenv("OPENAI_API_KEY"),
72
+ api_key: str | None = None,
47
73
  decorator_configs: DecoratorConfigs | None = None,
48
74
  default_thinking_config: ThinkingConfig | None = None,
49
75
  default_max_tokens: int | None = None,
50
76
  **kwargs,
51
77
  ):
78
+ if api_key is None:
79
+ api_key = os.getenv("OPENAI_API_KEY")
52
80
  if api_key is None or not isinstance(api_key, str):
53
81
  raise ValueError("To create an openai llm client you need to either set the environment variable OPENAI_API_KEY or pass the api_key in string format")
54
82
  super().__init__(OpenaiLLMClient.PROVIDER, model, decorator_configs=decorator_configs, default_thinking_config=default_thinking_config, default_max_tokens=default_max_tokens)
@@ -113,6 +141,7 @@ class OpenaiLLMClient(BaseLLMClient):
113
141
  openai_thinking_config["reasoning"] = {"effort": "high"}
114
142
  return openai_thinking_config
115
143
 
144
+ @_error_handler
116
145
  def create(
117
146
  self,
118
147
  messages: list[Content],
@@ -223,7 +252,8 @@ class OpenaiLLMClient(BaseLLMClient):
223
252
  )
224
253
  else:
225
254
  raise ValueError(f"Unsupported result type: {result_type}. Supported types are None, 'json', or a Pydantic model class.")
226
-
255
+
256
+ @_error_handler
227
257
  def create_stream(
228
258
  self,
229
259
  messages: list[Content],
@@ -277,6 +307,26 @@ class OpenaiLLMClient(BaseLLMClient):
277
307
  return models
278
308
 
279
309
 
310
+ @inherited_decorator
311
+ def _error_handler_async(func: Callable[P, Awaitable[Response]]) -> Callable[P, Awaitable[Response]]:
312
+ """
313
+ Decorator to catch error from openai and transform it into unified one
314
+ """
315
+ @wraps(func)
316
+ async def wrapper(*args, **kwargs):
317
+ try:
318
+ return await func(*args, **kwargs)
319
+ except OpenAIAPIError as e:
320
+ code = getattr(e, "status_code", None) or e.code
321
+ response = getattr(e, "response", None)
322
+ status = getattr(response, "reason_phrase", None)
323
+ response_json = {
324
+ "status": status,
325
+ "message": e.message,
326
+ }
327
+ raise APIError(code, response_json, response)
328
+ return wrapper
329
+
280
330
  class OpenaiStreamIteratorAsync:
281
331
  def __init__(self, openai_iterator: AsyncStream[ResponseStreamEvent]):
282
332
  self._openai_iterator = openai_iterator
@@ -308,12 +358,14 @@ class OpenaiLLMClientAsync(BaseLLMClientAsync):
308
358
  def __init__(
309
359
  self,
310
360
  model: str,
311
- api_key: str = os.getenv("OPENAI_API_KEY"),
361
+ api_key: str | None = None,
312
362
  decorator_configs: DecoratorConfigs | None = None,
313
363
  default_thinking_config: ThinkingConfig | None = None,
314
364
  default_max_tokens: int | None = None,
315
365
  **kwargs,
316
366
  ):
367
+ if api_key is None:
368
+ api_key = os.getenv("OPENAI_API_KEY")
317
369
  if api_key is None or not isinstance(api_key, str):
318
370
  raise ValueError("To create an openai llm client you need to either set the environment variable OPENAI_API_KEY or pass the api_key in string format")
319
371
  super().__init__(OpenaiLLMClientAsync.PROVIDER, model, decorator_configs=decorator_configs, default_thinking_config=default_thinking_config, default_max_tokens=default_max_tokens)
@@ -324,6 +376,7 @@ class OpenaiLLMClientAsync(BaseLLMClientAsync):
324
376
  def api_key(self) -> str:
325
377
  return self._api_key
326
378
 
379
+ @_error_handler_async
327
380
  async def create(
328
381
  self,
329
382
  messages: list[Content],
@@ -441,7 +494,8 @@ class OpenaiLLMClientAsync(BaseLLMClientAsync):
441
494
  )
442
495
  else:
443
496
  raise ValueError(f"Unsupported result_type: {result_type}. Supported types are: None, 'json', or a Pydantic model.")
444
-
497
+
498
+ @_error_handler_async
445
499
  async def create_stream(
446
500
  self,
447
501
  messages: list[Content],
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: promptbuilder
3
- Version: 0.4.19
3
+ Version: 0.4.21
4
4
  Summary: Library for building prompts for LLMs
5
5
  Home-page: https://github.com/kapulkin/promptbuilder
6
6
  Author: Kapulkin Stanislav
@@ -6,20 +6,21 @@ promptbuilder/agent/agent.py,sha256=dVu251C1r9w5LS2P_shsIRH9tFz1Jq93MDv3Uu41_4E,
6
6
  promptbuilder/agent/context.py,sha256=CVw715vFrhfvddQmRNy4A1U87GsZyIKj9Xu4SCidbc0,1120
7
7
  promptbuilder/agent/tool.py,sha256=VDbIHK3_Q62Ei7hwLF7nIgHq-PTMKnv1NSjHpDYkUZE,2651
8
8
  promptbuilder/agent/utils.py,sha256=vTkphKw04v_QDIJtoB2JKK0RGY6iI1t_0LbmuStunzI,356
9
- promptbuilder/llm_client/__init__.py,sha256=2tPVYqwNdwTRdIg4Pde6Nc259FJvy70gjEj1N2oqNrc,458
9
+ promptbuilder/llm_client/__init__.py,sha256=wJ33cnRtZX_YPsbcGxEu3SEZMOhPX7-fHI59MEPUe7I,517
10
10
  promptbuilder/llm_client/aisuite_client.py,sha256=aMqg05zefzck9Lz7pm7jZoKFdzr_ymFYhrAjZtzdHlQ,15561
11
- promptbuilder/llm_client/anthropic_client.py,sha256=JeTVC26ahuJJT4G_3Bsoc4TqLzVDPuJpJiCRxTALnqA,26146
11
+ promptbuilder/llm_client/anthropic_client.py,sha256=vWuyFZL_LohOE0UYjB1-zTr4tJZMUcGk8H10gpjzdkk,28074
12
12
  promptbuilder/llm_client/base_client.py,sha256=GS-Qb20WtZnljmEUD2ibhTHDet7exoyhQ0_mGNAEKlg,24219
13
- promptbuilder/llm_client/bedrock_client.py,sha256=W4wFW7Vbv-nsT2ReyhJ4YIPSTXxE_4S83352vJDSmDk,25772
13
+ promptbuilder/llm_client/bedrock_client.py,sha256=e9vUClbybQb32028oDBW6IbyPYqj1ZSSv9y36ZqUWxM,27941
14
14
  promptbuilder/llm_client/config.py,sha256=exQEm35wp7lK5SfXNpN5H9VZEb2LVa4pyZ-cxGt1U-U,1124
15
- promptbuilder/llm_client/google_client.py,sha256=heyeACt_0bVP3p4pCQeWR92MhCsyNk844kWJ_0MVTfg,9830
15
+ promptbuilder/llm_client/exceptions.py,sha256=t-X7r_a8B1jNu8eEavde1jXu5dz97yV3IG4YHOtgh0Y,4836
16
+ promptbuilder/llm_client/google_client.py,sha256=y1_CFXBijUiRTyAJsh-8a6CGIwwlZBskO5kWqVWZcPo,11780
16
17
  promptbuilder/llm_client/logfire_decorators.py,sha256=un_QnIekypOEcqTZ5v1y9pwijGnF95xwnwKO5rFSHVY,9667
17
- promptbuilder/llm_client/main.py,sha256=k4JTyKq2atNyFtI1bjjqXEnGSEugj4xk0AJEvHJiMig,8310
18
- promptbuilder/llm_client/openai_client.py,sha256=5yvjp-Zzp4JsBC9_ffSb1A9-iMG4Lu2B2et2CdtK9R0,22864
18
+ promptbuilder/llm_client/main.py,sha256=5r_MhKVTD4cS90AHR89JJRKiWYBk35Y3JvhvmOxkYHc,8110
19
+ promptbuilder/llm_client/openai_client.py,sha256=GdyTbUPsbACXZYF0BnCRyLVw24_WM1R_MMr6pDpiiV4,24787
19
20
  promptbuilder/llm_client/types.py,sha256=2E-aPRb5uAkLFJocmjF1Lh2aQRq9r8a5JRIw-duHfjA,7460
20
21
  promptbuilder/llm_client/utils.py,sha256=79lvSppjrrItHB5MIozbp_5Oq7TsOK4Qzt9Ae3XMLFw,7624
21
- promptbuilder-0.4.19.dist-info/licenses/LICENSE,sha256=fqXmInzgsvEOIaKSBgcrwKyYCGYF0MKErJ0YivtODcc,1096
22
- promptbuilder-0.4.19.dist-info/METADATA,sha256=H7BlzTYhhJi7NGunmjiYhaUqWAhWS-6ELC682S14VKY,3738
23
- promptbuilder-0.4.19.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
24
- promptbuilder-0.4.19.dist-info/top_level.txt,sha256=UBVcYn4UgrPy3O3fmmnPEU_kieuplBMgheetIMei4EI,14
25
- promptbuilder-0.4.19.dist-info/RECORD,,
22
+ promptbuilder-0.4.21.dist-info/licenses/LICENSE,sha256=fqXmInzgsvEOIaKSBgcrwKyYCGYF0MKErJ0YivtODcc,1096
23
+ promptbuilder-0.4.21.dist-info/METADATA,sha256=A44iObCMOJxcKjF6fYxU-O-QugjMPrgkJJJDu2Rm4Vc,3738
24
+ promptbuilder-0.4.21.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
25
+ promptbuilder-0.4.21.dist-info/top_level.txt,sha256=UBVcYn4UgrPy3O3fmmnPEU_kieuplBMgheetIMei4EI,14
26
+ promptbuilder-0.4.21.dist-info/RECORD,,