promptbuilder 0.4.18__py3-none-any.whl → 0.4.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,206 @@
1
+ import os
2
+ import asyncio
3
+ from copy import deepcopy
4
+ from typing import Literal, get_args
5
+
6
+ import numpy as np
7
+ from google import genai
8
+ from google.genai.types import EmbedContentConfig, EmbedContentResponse
9
+ from openai import AsyncOpenAI
10
+
11
+ import promptbuilder.llm_client.utils as utils
12
+
13
+
14
+ type EMBS_TASK_TYPE = Literal["RETRIEVAL_QUERY", "RETRIEVAL_DOCUMENT", "SEMANTIC_SIMILARITY"]
15
+ type EMBEDDING = list[float]
16
+ EMBS_TASKS = get_args(EMBS_TASK_TYPE)
17
+
18
+
19
+ def normalize_embeddings(embs: list[list[float]] | list[float]) -> list[list[float]] | list[float]:
20
+ embs_np = np.array(embs)
21
+ emb_norms = np.sqrt(np.sum(embs_np * embs_np, axis=-1, keepdims=True))
22
+ embs_np = embs_np / emb_norms
23
+ return embs_np.tolist()
24
+
25
+
26
+ class EmbeddingsApi(utils.InheritDecoratorsMixin):
27
+ available_model_dims: dict[str, list[int]] = {}
28
+ default_model_dim: dict[str, int] = {}
29
+ model_name_prefix: str = ""
30
+
31
+ def __init__(self, model_name: str, embs_dim: int | None = None, *args, retry_times: int = 0, retry_delay: float = 0, **kwargs):
32
+ if model_name not in self.available_model_dims:
33
+ raise ValueError(f"Model {model_name} is not supported.")
34
+ if embs_dim is None:
35
+ embs_dim = self.default_model_dim[model_name]
36
+ else:
37
+ if embs_dim not in self.available_model_dims[model_name]:
38
+ raise ValueError(f"Model {model_name} does not support embedding dimension {embs_dim}.")
39
+
40
+ self._model_name = model_name
41
+ self._embs_dim = embs_dim
42
+ self._retry_times = retry_times
43
+ self._retry_delay = retry_delay
44
+
45
+ @property
46
+ def embeddings_dim(self) -> int:
47
+ return self._embs_dim
48
+
49
+ @property
50
+ def model_name(self) -> str:
51
+ return self.model_name_prefix + self._model_name
52
+
53
+ @utils.retry_cls_async
54
+ async def get_embeddings(
55
+ self,
56
+ texts: list[str] | str,
57
+ task_types: list[EMBS_TASK_TYPE] | EMBS_TASK_TYPE = ["SEMANTIC_SIMILARITY"],
58
+ normalize: bool = True,
59
+ ) -> dict[EMBS_TASK_TYPE, list[EMBEDDING]] | dict[EMBS_TASK_TYPE, EMBEDDING] | list[EMBEDDING] | EMBEDDING:
60
+ pass
61
+
62
+
63
+ class GoogleEmbsApi(EmbeddingsApi):
64
+ available_model_dims: dict[str, list[int]] = {"text-embedding-004": [768]}
65
+ default_model_dim: dict[str, int] = {"text-embedding-004": 768}
66
+ model_name_prefix: str = "google:"
67
+
68
+ def __init__(
69
+ self,
70
+ model_name: str = "text-embedding-004",
71
+ embs_dim: int | None = None,
72
+ *,
73
+ retry_times: int = 0,
74
+ retry_delay: float = 0,
75
+ **kwargs,
76
+ ):
77
+ super().__init__(model_name, embs_dim, retry_times=retry_times, retry_delay=retry_delay)
78
+ self._client = genai.Client(api_key=os.getenv("GOOGLEAI_API_KEY"))
79
+ self._rpm_limit = 145
80
+
81
+ async def get_embeddings(
82
+ self,
83
+ texts: list[str] | str,
84
+ task_types: list[EMBS_TASK_TYPE] | EMBS_TASK_TYPE = ["SEMANTIC_SIMILARITY"],
85
+ normalize: bool = True,
86
+ **kwargs,
87
+ ) -> dict[EMBS_TASK_TYPE, list[EMBEDDING]] | dict[EMBS_TASK_TYPE, EMBEDDING] | list[EMBEDDING] | EMBEDDING:
88
+ batch_size = 10
89
+
90
+ if isinstance(task_types, list):
91
+ task_types = list(set(task_types))
92
+ embeddings = await asyncio.gather(*[self.get_embeddings(texts, task_type, normalize) for task_type in task_types])
93
+ response = {task_type: embs for task_type, embs in zip(task_types, embeddings)}
94
+ return response
95
+
96
+ task_type = task_types
97
+ if isinstance(texts, str):
98
+ response = await self._api_request(
99
+ model=self._model_name,
100
+ contents=texts,
101
+ config=EmbedContentConfig(task_type=task_type),
102
+ )
103
+ if normalize:
104
+ return normalize_embeddings(response.embeddings[0].values)
105
+ else:
106
+ return response.embeddings[0].values
107
+ elif isinstance(texts, list):
108
+ batches_num = len(texts) // batch_size + 1
109
+ result_embeddings: list[list[float]] = []
110
+
111
+ for i in range(batches_num):
112
+ first_idx = i * batch_size
113
+ last_idx = (i + 1) * batch_size
114
+ batch = texts[first_idx: last_idx]
115
+ if len(batch) > 0:
116
+ response = await self._api_request(
117
+ model=self._model_name,
118
+ contents=batch,
119
+ config=EmbedContentConfig(task_type=task_type),
120
+ )
121
+ result_embeddings += [embeddings.values for embeddings in response.embeddings]
122
+
123
+ if normalize:
124
+ return normalize_embeddings(result_embeddings)
125
+ else:
126
+ return result_embeddings
127
+ else:
128
+ raise ValueError("'texts' must be a string or a list of strings.")
129
+
130
+ @utils.rpm_limit_cls_async
131
+ async def _api_request(self, model: str, contents: str | list[str], config: EmbedContentConfig) -> EmbedContentResponse:
132
+ return await self._client.aio.models.embed_content(
133
+ model=model,
134
+ contents=contents,
135
+ config=config,
136
+ )
137
+
138
+
139
+ class OpenAIEmbsApi(EmbeddingsApi):
140
+ available_model_dims: dict[str, list[int]] = {
141
+ "text-embedding-3-small": [512, 1536],
142
+ "text-embedding-3-large": [1024, 3072],
143
+ }
144
+ default_model_dim: dict[str, int] = {
145
+ "text-embedding-3-small": 1536,
146
+ "text-embedding-3-large": 3072,
147
+ }
148
+ model_name_prefix: str = "openai:"
149
+
150
+ def __init__(
151
+ self,
152
+ model_name: str = "text-embedding-3-small",
153
+ embs_dim: int | None = None,
154
+ *,
155
+ retry_times: int = 0,
156
+ retry_delay: float = 0,
157
+ **kwargs,
158
+ ):
159
+ super().__init__(model_name, embs_dim, retry_times=retry_times, retry_delay=retry_delay)
160
+ self._client = AsyncOpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
161
+
162
+ async def get_embeddings(
163
+ self,
164
+ texts: list[str] | str,
165
+ task_types: list[EMBS_TASK_TYPE] | EMBS_TASK_TYPE = ["SEMANTIC_SIMILARITY"],
166
+ normalize: bool = True,
167
+ **kwargs,
168
+ ) -> dict[EMBS_TASK_TYPE, list[EMBEDDING]] | dict[EMBS_TASK_TYPE, EMBEDDING] | list[EMBEDDING] | EMBEDDING:
169
+ if isinstance(task_types, list):
170
+ task_types = list(set(task_types))
171
+ embeddings = await self.get_embeddings(texts, "SEMANTIC_SIMILARITY", normalize)
172
+ response = {task_type: deepcopy(embeddings) for task_type in task_types}
173
+ return response
174
+
175
+ if isinstance(texts, str):
176
+ response = await self._client.embeddings.create(
177
+ input=texts,
178
+ model=self._model_name,
179
+ dimensions=self._embs_dim,
180
+ )
181
+ if normalize:
182
+ return normalize_embeddings(response.data[0].embedding)
183
+ else:
184
+ return response.data[0].embedding
185
+ elif isinstance(texts, list):
186
+ batches_num = len(texts) // 100 + 1
187
+ result_embeddings = []
188
+
189
+ for i in range(batches_num):
190
+ first_idx = i * 100
191
+ last_idx = (i + 1) * 100
192
+ batch = texts[first_idx: last_idx]
193
+ if len(batch) > 0:
194
+ response = await self._client.embeddings.create(
195
+ input=texts,
196
+ model=self._model_name,
197
+ dimensions=self._embs_dim,
198
+ )
199
+ result_embeddings += [emb.embedding for emb in response.data]
200
+
201
+ if normalize:
202
+ return normalize_embeddings(result_embeddings)
203
+ else:
204
+ return result_embeddings
205
+ else:
206
+ raise ValueError("'texts' must be a string or a list of strings.")
@@ -2,3 +2,4 @@ from .base_client import BaseLLMClient, BaseLLMClientAsync, CachedLLMClient, Cac
2
2
  from .types import Completion, Message, Choice, Usage, Response, Candidate, Content, Part, UsageMetadata, Tool, ToolConfig, ThinkingConfig, FunctionCall, FunctionDeclaration
3
3
  from .main import get_client, get_async_client, configure, sync_existing_clients_with_global_config, get_models_list
4
4
  from .utils import DecoratorConfigs, RpmLimitConfig, RetryConfig
5
+ from.exceptions import APIError, ClientError, ServerError
@@ -1,14 +1,21 @@
1
1
  import os
2
- from typing import AsyncIterator, Iterator
3
2
  import base64
3
+ from functools import wraps
4
+ from typing import AsyncIterator, Iterator, Callable, ParamSpec, Awaitable
5
+
4
6
  from pydantic import BaseModel
5
- from anthropic import Anthropic, AsyncAnthropic, Stream, AsyncStream
7
+ from anthropic import Anthropic, AsyncAnthropic, Stream, AsyncStream, APIError as AnthropicAPIError
6
8
  from anthropic.types import RawMessageStreamEvent
7
9
 
8
10
  from promptbuilder.llm_client.base_client import BaseLLMClient, BaseLLMClientAsync, ResultType
9
11
  from promptbuilder.llm_client.types import Response, Content, Candidate, UsageMetadata, Part, ThinkingConfig, Tool, ToolConfig, FunctionCall, MessageDict, Model
10
12
  from promptbuilder.llm_client.config import DecoratorConfigs
11
13
  from promptbuilder.prompt_builder import PromptBuilder
14
+ from promptbuilder.llm_client.utils import inherited_decorator
15
+ from promptbuilder.llm_client.exceptions import APIError
16
+
17
+
18
+ P = ParamSpec("P")
12
19
 
13
20
 
14
21
  def sum_optional_ints(a: int | None, b: int | None) -> int | None:
@@ -70,6 +77,26 @@ class AnthropicDefaultMaxTokensStrategy(DefaultMaxTokensStrategy):
70
77
  return 32000
71
78
 
72
79
 
80
+ @inherited_decorator
81
+ def _error_handler(func: Callable[P, Response]) -> Callable[P, Response]:
82
+ """
83
+ Decorator to catch error from anthropic and transform it into unified one
84
+ """
85
+ @wraps(func)
86
+ def wrapper(*args, **kwargs):
87
+ try:
88
+ return func(*args, **kwargs)
89
+ except AnthropicAPIError as e:
90
+ code = getattr(e, "status_code", None)
91
+ response = getattr(e, "response", None)
92
+ status = getattr(response, "reason_phrase", None)
93
+ response_json = {
94
+ "status": status,
95
+ "message": e.message,
96
+ }
97
+ raise APIError(code, response_json, response)
98
+ return wrapper
99
+
73
100
  class AnthropicStreamIterator:
74
101
  def __init__(self, anthropic_iterator: Stream[RawMessageStreamEvent]):
75
102
  self._anthropic_iterator = anthropic_iterator
@@ -162,6 +189,7 @@ class AnthropicLLMClient(BaseLLMClient):
162
189
  anthropic_messages.append({"role": role, "content": content})
163
190
  return anthropic_messages
164
191
 
192
+ @_error_handler
165
193
  def create(
166
194
  self,
167
195
  messages: list[Content],
@@ -278,6 +306,7 @@ class AnthropicLLMClient(BaseLLMClient):
278
306
  else:
279
307
  raise ValueError(f"Unsupported result type: {result_type}")
280
308
 
309
+ @_error_handler
281
310
  def create_stream(
282
311
  self,
283
312
  messages: list[Content],
@@ -334,6 +363,26 @@ class AnthropicLLMClient(BaseLLMClient):
334
363
  return models
335
364
 
336
365
 
366
+ @inherited_decorator
367
+ def _error_handler_async(func: Callable[P, Awaitable[Response]]) -> Callable[P, Awaitable[Response]]:
368
+ """
369
+ Decorator to catch error from anthropic and transform it into unified one
370
+ """
371
+ @wraps(func)
372
+ async def wrapper(*args, **kwargs):
373
+ try:
374
+ return await func(*args, **kwargs)
375
+ except AnthropicAPIError as e:
376
+ code = getattr(e, "status_code", None)
377
+ response = getattr(e, "response", None)
378
+ status = getattr(response, "reason_phrase", None)
379
+ response_json = {
380
+ "status": status,
381
+ "message": e.message,
382
+ }
383
+ raise APIError(code, response_json, response)
384
+ return wrapper
385
+
337
386
  class AnthropicStreamIteratorAsync:
338
387
  def __init__(self, anthropic_iterator: AsyncStream[RawMessageStreamEvent]):
339
388
  self._anthropic_iterator = anthropic_iterator
@@ -391,6 +440,7 @@ class AnthropicLLMClientAsync(BaseLLMClientAsync):
391
440
  def api_key(self) -> str:
392
441
  return self._api_key
393
442
 
443
+ @_error_handler_async
394
444
  async def create(
395
445
  self,
396
446
  messages: list[Content],
@@ -513,6 +563,7 @@ class AnthropicLLMClientAsync(BaseLLMClientAsync):
513
563
  else:
514
564
  raise ValueError(f"Unsupported result_type: {result_type}. Supported types are: None, 'json', or a Pydantic model.")
515
565
 
566
+ @_error_handler_async
516
567
  async def create_stream(
517
568
  self,
518
569
  messages: list[Content],
@@ -1,17 +1,24 @@
1
1
  import os
2
- from typing import AsyncIterator, Iterator, Any
2
+ from functools import wraps
3
+ from typing import AsyncIterator, Iterator, Any, Callable, ParamSpec, Awaitable
3
4
 
4
5
  import boto3
6
+ from boto3.exceptions import Boto3Error
5
7
  import aioboto3
6
8
  from pydantic import BaseModel, ConfigDict
7
9
  from botocore.eventstream import EventStream
10
+ from botocore.exceptions import ClientError, BotoCoreError
8
11
 
9
12
  from promptbuilder.llm_client.base_client import BaseLLMClient, BaseLLMClientAsync, ResultType
10
13
  from promptbuilder.llm_client.types import Response, Content, Candidate, UsageMetadata, Part, ThinkingConfig, Tool, ToolConfig, FunctionCall, CustomApiKey, Model
11
14
  from promptbuilder.llm_client.config import DecoratorConfigs
12
15
  from promptbuilder.prompt_builder import PromptBuilder
16
+ from promptbuilder.llm_client.utils import inherited_decorator
17
+ from promptbuilder.llm_client.exceptions import APIError
13
18
 
14
19
 
20
+ P = ParamSpec("P")
21
+
15
22
  class BedrockApiKey(BaseModel, CustomApiKey):
16
23
  model_config = ConfigDict(frozen=True)
17
24
 
@@ -20,6 +27,26 @@ class BedrockApiKey(BaseModel, CustomApiKey):
20
27
  aws_region: str = os.getenv("AWS_DEFAULT_REGION", "us-east-1")
21
28
 
22
29
 
30
+ @inherited_decorator
31
+ def _error_handler(func: Callable[P, Response]) -> Callable[P, Response]:
32
+ """
33
+ Decorator to catch error from boto libs and transform it into unified one
34
+ """
35
+ @wraps(func)
36
+ def wrapper(*args, **kwargs):
37
+ try:
38
+ return func(*args, **kwargs)
39
+ except (Boto3Error, BotoCoreError, ClientError) as e:
40
+ code = None
41
+ response = None
42
+ status = None
43
+ response_json = {
44
+ "status": status,
45
+ "message": str(e.args),
46
+ }
47
+ raise APIError(code, response_json, response)
48
+ return wrapper
49
+
23
50
  class BedrockStreamIterator:
24
51
  def __init__(self, bedrock_iterator: EventStream):
25
52
  self._bedrock_iterator = bedrock_iterator
@@ -69,6 +96,7 @@ class BedrockLLMClient(BaseLLMClient):
69
96
  def api_key(self) -> BedrockApiKey:
70
97
  return self._api_key
71
98
 
99
+ @_error_handler
72
100
  def create(
73
101
  self,
74
102
  messages: list[Content],
@@ -226,7 +254,8 @@ class BedrockLLMClient(BaseLLMClient):
226
254
  ),
227
255
  parsed=parsed_pydantic,
228
256
  )
229
-
257
+
258
+ @_error_handler
230
259
  def create_stream(
231
260
  self,
232
261
  messages: list[Content],
@@ -278,6 +307,27 @@ class BedrockLLMClient(BaseLLMClient):
278
307
  ))
279
308
  return models
280
309
 
310
+
311
+ @inherited_decorator
312
+ def _error_handler_async(func: Callable[P, Awaitable[Response]]) -> Callable[P, Awaitable[Response]]:
313
+ """
314
+ Decorator to catch error from boto libs and transform it into unified one
315
+ """
316
+ @wraps(func)
317
+ async def wrapper(*args, **kwargs):
318
+ try:
319
+ return await func(*args, **kwargs)
320
+ except (Boto3Error, BotoCoreError, ClientError) as e:
321
+ code = None
322
+ response = None
323
+ status = None
324
+ response_json = {
325
+ "status": status,
326
+ "message": str(e.args),
327
+ }
328
+ raise APIError(code, response_json, response)
329
+ return wrapper
330
+
281
331
  class BedrockStreamIteratorAsync:
282
332
  def __init__(self, aioboto_session: aioboto3.Session, **bedrock_kwargs):
283
333
  self._aioboto_session = aioboto_session
@@ -336,6 +386,7 @@ class BedrockLLMClientAsync(BaseLLMClientAsync):
336
386
  def api_key(self) -> BedrockApiKey:
337
387
  return self._api_key
338
388
 
389
+ @_error_handler_async
339
390
  async def create(
340
391
  self,
341
392
  messages: list[Content],
@@ -487,7 +538,8 @@ class BedrockLLMClientAsync(BaseLLMClientAsync):
487
538
  ),
488
539
  parsed=parsed_pydantic,
489
540
  )
490
-
541
+
542
+ @_error_handler_async
491
543
  async def create_stream(
492
544
  self,
493
545
  messages: list[Content],
@@ -0,0 +1,128 @@
1
+ import json
2
+ from typing import Any
3
+
4
+ import httpx
5
+ import aiohttp
6
+
7
+
8
+ class APIError(Exception):
9
+ """General errors raised by the llm clients"""
10
+ code: int | None = None
11
+ status: str | None = None
12
+ message: str | None = None
13
+ response: httpx.Response | aiohttp.ClientResponse | None = None
14
+
15
+ def __init__(self, code: int | None = None, response_json: dict[str, Any] = {}, response: httpx.Response | aiohttp.ClientResponse | None = None):
16
+ self.response = response
17
+ self.details = response_json
18
+ self.message = self._get_message(response_json)
19
+ self.status = self._get_status(response_json)
20
+ self.code = code if code else self._get_code(response_json)
21
+
22
+ super().__init__(f"{self.code} {self.status}. {self.details}")
23
+
24
+ def _get_status(self, response_json: dict[str, Any]) -> Any:
25
+ return response_json.get(
26
+ "status", response_json.get("error", {}).get("status", None)
27
+ )
28
+
29
+ def _get_message(self, response_json: dict[str, Any]) -> Any:
30
+ return response_json.get(
31
+ "message", response_json.get("error", {}).get("message", None)
32
+ )
33
+
34
+ def _get_code(self, response_json: dict[str, Any]) -> Any:
35
+ return response_json.get(
36
+ "code", response_json.get("error", {}).get("code", None)
37
+ )
38
+
39
+ def _to_replay_record(self) -> dict[str, Any]:
40
+ """Returns a dictionary representation of the error for replay recording."""
41
+ return {
42
+ "error": {
43
+ "code": self.code,
44
+ "message": self.message,
45
+ "status": self.status,
46
+ }
47
+ }
48
+
49
+ @classmethod
50
+ def raise_for_response(cls, response: httpx.Response):
51
+ """Raises an error with detailed error message if the response has an error status."""
52
+ if response.status_code == 200:
53
+ return
54
+
55
+ if isinstance(response, httpx.Response):
56
+ try:
57
+ response.read()
58
+ response_json = response.json()
59
+ except json.decoder.JSONDecodeError:
60
+ message = response.text
61
+ response_json = {
62
+ "message": message,
63
+ "status": response.reason_phrase,
64
+ }
65
+ else:
66
+ response_json = response.body_segments[0].get('error', {})
67
+
68
+ status_code = response.status_code
69
+ if 400 <= status_code < 500:
70
+ raise ClientError(status_code, response_json, response)
71
+ elif 500 <= status_code < 600:
72
+ raise ServerError(status_code, response_json, response)
73
+ else:
74
+ raise cls(status_code, response_json, response)
75
+
76
+ @classmethod
77
+ async def raise_for_async_response(cls, response: httpx.Response | aiohttp.ClientResponse):
78
+ """Raises an error with detailed error message if the response has an error status."""
79
+ status_code = 0
80
+ response_json = None
81
+ if isinstance(response, httpx.Response):
82
+ if response.status_code == 200:
83
+ return
84
+ try:
85
+ await response.aread()
86
+ response_json = response.json()
87
+ except json.decoder.JSONDecodeError:
88
+ message = response.text
89
+ response_json = {
90
+ "message": message,
91
+ "status": response.reason_phrase,
92
+ }
93
+ status_code = response.status_code
94
+ else:
95
+ try:
96
+ if isinstance(response, aiohttp.ClientResponse):
97
+ if response.status == 200:
98
+ return
99
+ try:
100
+ response_json = await response.json()
101
+ except aiohttp.client_exceptions.ContentTypeError:
102
+ message = await response.text()
103
+ response_json = {
104
+ "message": message,
105
+ "status": response.reason,
106
+ }
107
+ status_code = response.status
108
+ else:
109
+ response_json = response.body_segments[0].get("error", {})
110
+ except ImportError:
111
+ response_json = response.body_segments[0].get("error", {})
112
+
113
+ if 400 <= status_code < 500:
114
+ raise ClientError(status_code, response_json, response)
115
+ elif 500 <= status_code < 600:
116
+ raise ServerError(status_code, response_json, response)
117
+ else:
118
+ raise cls(status_code, response_json, response)
119
+
120
+
121
+ class ClientError(APIError):
122
+ """Client error raised by the llm clients"""
123
+ pass
124
+
125
+
126
+ class ServerError(APIError):
127
+ """Server error raised by the llm clients"""
128
+ pass
@@ -1,12 +1,42 @@
1
1
  import os
2
- from typing import AsyncIterator, Iterator
2
+ from functools import wraps
3
+ from typing import AsyncIterator, Iterator, Callable, ParamSpec, Awaitable
3
4
 
4
5
  from pydantic import BaseModel
6
+ from tenacity import RetryError
5
7
  from google.genai import Client, types
6
8
 
7
9
  from promptbuilder.llm_client.base_client import BaseLLMClient, BaseLLMClientAsync, ResultType
8
10
  from promptbuilder.llm_client.types import Response, Content, Part, ThinkingConfig, Tool, ToolConfig, Model
9
11
  from promptbuilder.llm_client.config import DecoratorConfigs
12
+ from promptbuilder.llm_client.utils import inherited_decorator
13
+ from promptbuilder.llm_client.exceptions import APIError
14
+
15
+
16
+ P = ParamSpec("P")
17
+
18
+
19
+ @inherited_decorator
20
+ def _error_handler(func: Callable[P, Response]) -> Callable[P, Response]:
21
+ """
22
+ Decorator to catch error from google.genai and transform it into unified one
23
+ """
24
+ @wraps(func)
25
+ def wrapper(*args, **kwargs):
26
+ try:
27
+ return func(*args, **kwargs)
28
+ except RetryError as retry_error:
29
+ e = retry_error.last_attempt._exception
30
+ if e is None:
31
+ raise APIError()
32
+ code = e.code
33
+ response_json = {
34
+ "status": e.status,
35
+ "message": e.message,
36
+ }
37
+ response = e.response
38
+ raise APIError(code, response_json, response)
39
+ return wrapper
10
40
 
11
41
 
12
42
  class GoogleLLMClient(BaseLLMClient):
@@ -53,7 +83,8 @@ class GoogleLLMClient(BaseLLMClient):
53
83
  )
54
84
  new_messages.append(new_message)
55
85
  return new_messages
56
-
86
+
87
+ @_error_handler
57
88
  def create(
58
89
  self,
59
90
  messages: list[Content],
@@ -103,7 +134,8 @@ class GoogleLLMClient(BaseLLMClient):
103
134
  )
104
135
  else:
105
136
  raise ValueError(f"Unsupported result_type: {result_type}. Supported types are: None, 'json', or a Pydantic model.")
106
-
137
+
138
+ @_error_handler
107
139
  def create_stream(
108
140
  self,
109
141
  messages: list[Content],
@@ -159,6 +191,28 @@ class GoogleLLMClient(BaseLLMClient):
159
191
  return models
160
192
 
161
193
 
194
+ @inherited_decorator
195
+ def _error_handler_async(func: Callable[P, Awaitable[Response]]) -> Callable[P, Awaitable[Response]]:
196
+ """
197
+ Decorator to catch error from google.genai and transform it into unified one
198
+ """
199
+ @wraps(func)
200
+ async def wrapper(*args, **kwargs):
201
+ try:
202
+ return await func(*args, **kwargs)
203
+ except RetryError as retry_error:
204
+ e = retry_error.last_attempt._exception
205
+ if e is None:
206
+ raise APIError()
207
+ code = e.code
208
+ response_json = {
209
+ "status": e.status,
210
+ "message": e.message,
211
+ }
212
+ response = e.response
213
+ raise APIError(code, response_json, response)
214
+ return wrapper
215
+
162
216
  class GoogleLLMClientAsync(BaseLLMClientAsync):
163
217
  PROVIDER: str = "google"
164
218
 
@@ -181,6 +235,7 @@ class GoogleLLMClientAsync(BaseLLMClientAsync):
181
235
  def api_key(self) -> str:
182
236
  return self._api_key
183
237
 
238
+ @_error_handler_async
184
239
  async def create(
185
240
  self,
186
241
  messages: list[Content],
@@ -222,7 +277,8 @@ class GoogleLLMClientAsync(BaseLLMClientAsync):
222
277
  )
223
278
  else:
224
279
  raise ValueError(f"Unsupported result_type: {result_type}. Supported types are: None, 'json', or a Pydantic model.")
225
-
280
+
281
+ @_error_handler_async
226
282
  async def create_stream(
227
283
  self,
228
284
  messages: list[Content],
@@ -1,17 +1,43 @@
1
1
  import os
2
2
  import json
3
3
  import base64
4
- from typing import AsyncIterator, Iterator
4
+ from functools import wraps
5
+ from typing import AsyncIterator, Iterator, Callable, ParamSpec, Awaitable
5
6
 
6
7
  from pydantic import BaseModel
7
- from openai import OpenAI, AsyncOpenAI, Stream, AsyncStream
8
+ from openai import OpenAI, AsyncOpenAI, Stream, AsyncStream, APIError as OpenAIAPIError
8
9
  from openai.types.responses import ResponseStreamEvent
9
10
 
10
11
  from promptbuilder.llm_client.base_client import BaseLLMClient, BaseLLMClientAsync, ResultType
11
12
  from promptbuilder.llm_client.types import Response, Content, Candidate, UsageMetadata, Part, ThinkingConfig, Tool, ToolConfig, FunctionCall, MessageDict, Model
12
13
  from promptbuilder.llm_client.config import DecoratorConfigs
14
+ from promptbuilder.llm_client.utils import inherited_decorator
15
+ from promptbuilder.llm_client.exceptions import APIError
13
16
 
14
17
 
18
+ P = ParamSpec("P")
19
+
20
+
21
+ @inherited_decorator
22
+ def _error_handler(func: Callable[P, Response]) -> Callable[P, Response]:
23
+ """
24
+ Decorator to catch error from openai and transform it into unified one
25
+ """
26
+ @wraps(func)
27
+ def wrapper(*args, **kwargs):
28
+ try:
29
+ return func(*args, **kwargs)
30
+ except OpenAIAPIError as e:
31
+ code = getattr(e, "status_code", None) or e.code
32
+ response = getattr(e, "response", None)
33
+ status = getattr(response, "reason_phrase", None)
34
+ response_json = {
35
+ "status": status,
36
+ "message": e.message,
37
+ }
38
+ raise APIError(code, response_json, response)
39
+ return wrapper
40
+
15
41
  class OpenaiStreamIterator:
16
42
  def __init__(self, openai_iterator: Stream[ResponseStreamEvent]):
17
43
  self._openai_iterator = openai_iterator
@@ -113,6 +139,7 @@ class OpenaiLLMClient(BaseLLMClient):
113
139
  openai_thinking_config["reasoning"] = {"effort": "high"}
114
140
  return openai_thinking_config
115
141
 
142
+ @_error_handler
116
143
  def create(
117
144
  self,
118
145
  messages: list[Content],
@@ -223,7 +250,8 @@ class OpenaiLLMClient(BaseLLMClient):
223
250
  )
224
251
  else:
225
252
  raise ValueError(f"Unsupported result type: {result_type}. Supported types are None, 'json', or a Pydantic model class.")
226
-
253
+
254
+ @_error_handler
227
255
  def create_stream(
228
256
  self,
229
257
  messages: list[Content],
@@ -277,6 +305,26 @@ class OpenaiLLMClient(BaseLLMClient):
277
305
  return models
278
306
 
279
307
 
308
+ @inherited_decorator
309
+ def _error_handler_async(func: Callable[P, Awaitable[Response]]) -> Callable[P, Awaitable[Response]]:
310
+ """
311
+ Decorator to catch error from openai and transform it into unified one
312
+ """
313
+ @wraps(func)
314
+ async def wrapper(*args, **kwargs):
315
+ try:
316
+ return await func(*args, **kwargs)
317
+ except OpenAIAPIError as e:
318
+ code = getattr(e, "status_code", None) or e.code
319
+ response = getattr(e, "response", None)
320
+ status = getattr(response, "reason_phrase", None)
321
+ response_json = {
322
+ "status": status,
323
+ "message": e.message,
324
+ }
325
+ raise APIError(code, response_json, response)
326
+ return wrapper
327
+
280
328
  class OpenaiStreamIteratorAsync:
281
329
  def __init__(self, openai_iterator: AsyncStream[ResponseStreamEvent]):
282
330
  self._openai_iterator = openai_iterator
@@ -324,6 +372,7 @@ class OpenaiLLMClientAsync(BaseLLMClientAsync):
324
372
  def api_key(self) -> str:
325
373
  return self._api_key
326
374
 
375
+ @_error_handler_async
327
376
  async def create(
328
377
  self,
329
378
  messages: list[Content],
@@ -441,7 +490,8 @@ class OpenaiLLMClientAsync(BaseLLMClientAsync):
441
490
  )
442
491
  else:
443
492
  raise ValueError(f"Unsupported result_type: {result_type}. Supported types are: None, 'json', or a Pydantic model.")
444
-
493
+
494
+ @_error_handler_async
445
495
  async def create_stream(
446
496
  self,
447
497
  messages: list[Content],
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: promptbuilder
3
- Version: 0.4.18
3
+ Version: 0.4.20
4
4
  Summary: Library for building prompts for LLMs
5
5
  Home-page: https://github.com/kapulkin/promptbuilder
6
6
  Author: Kapulkin Stanislav
@@ -1,24 +1,26 @@
1
1
  promptbuilder/__init__.py,sha256=o_NdXl7NppM399-fy5VGfYkSN8iYDAaFAwJNhdkW3bI,56
2
+ promptbuilder/embeddings.py,sha256=bu-soCNYiHxshc1jejGmI5iJTIdotqEhmvpImSjlFTY,8087
2
3
  promptbuilder/prompt_builder.py,sha256=kK6WHr2umYmsanYb2fQVxqEajs_dzGPXRulTo40g36E,12428
3
4
  promptbuilder/agent/__init__.py,sha256=qG4Jq4wbmCH5NKLOX6ZMtZ7lFURhJXf464BntR-u5rU,56
4
5
  promptbuilder/agent/agent.py,sha256=dVu251C1r9w5LS2P_shsIRH9tFz1Jq93MDv3Uu41_4E,9274
5
6
  promptbuilder/agent/context.py,sha256=CVw715vFrhfvddQmRNy4A1U87GsZyIKj9Xu4SCidbc0,1120
6
7
  promptbuilder/agent/tool.py,sha256=VDbIHK3_Q62Ei7hwLF7nIgHq-PTMKnv1NSjHpDYkUZE,2651
7
8
  promptbuilder/agent/utils.py,sha256=vTkphKw04v_QDIJtoB2JKK0RGY6iI1t_0LbmuStunzI,356
8
- promptbuilder/llm_client/__init__.py,sha256=2tPVYqwNdwTRdIg4Pde6Nc259FJvy70gjEj1N2oqNrc,458
9
+ promptbuilder/llm_client/__init__.py,sha256=wJ33cnRtZX_YPsbcGxEu3SEZMOhPX7-fHI59MEPUe7I,517
9
10
  promptbuilder/llm_client/aisuite_client.py,sha256=aMqg05zefzck9Lz7pm7jZoKFdzr_ymFYhrAjZtzdHlQ,15561
10
- promptbuilder/llm_client/anthropic_client.py,sha256=JeTVC26ahuJJT4G_3Bsoc4TqLzVDPuJpJiCRxTALnqA,26146
11
+ promptbuilder/llm_client/anthropic_client.py,sha256=ypr-KqlQJXHlGzwdSNYy9jH69mUQGUWIEhVkVHrtaro,27946
11
12
  promptbuilder/llm_client/base_client.py,sha256=GS-Qb20WtZnljmEUD2ibhTHDet7exoyhQ0_mGNAEKlg,24219
12
- promptbuilder/llm_client/bedrock_client.py,sha256=W4wFW7Vbv-nsT2ReyhJ4YIPSTXxE_4S83352vJDSmDk,25772
13
+ promptbuilder/llm_client/bedrock_client.py,sha256=FlUVF4YEzX7JzL4hEFQtNSLSQwPOilnN8unqRgHE4M8,27508
13
14
  promptbuilder/llm_client/config.py,sha256=exQEm35wp7lK5SfXNpN5H9VZEb2LVa4pyZ-cxGt1U-U,1124
14
- promptbuilder/llm_client/google_client.py,sha256=heyeACt_0bVP3p4pCQeWR92MhCsyNk844kWJ_0MVTfg,9830
15
+ promptbuilder/llm_client/exceptions.py,sha256=t-X7r_a8B1jNu8eEavde1jXu5dz97yV3IG4YHOtgh0Y,4836
16
+ promptbuilder/llm_client/google_client.py,sha256=5AucfhCM32Sd2c3vr8oPNNwrVSSrZ9EgzIIVlIHx00U,11652
15
17
  promptbuilder/llm_client/logfire_decorators.py,sha256=un_QnIekypOEcqTZ5v1y9pwijGnF95xwnwKO5rFSHVY,9667
16
18
  promptbuilder/llm_client/main.py,sha256=k4JTyKq2atNyFtI1bjjqXEnGSEugj4xk0AJEvHJiMig,8310
17
- promptbuilder/llm_client/openai_client.py,sha256=5yvjp-Zzp4JsBC9_ffSb1A9-iMG4Lu2B2et2CdtK9R0,22864
19
+ promptbuilder/llm_client/openai_client.py,sha256=Mtd5Y5Bt-KgtyYPSXXCt-0ZiXN4_B0Rfya1EPxqNuaQ,24659
18
20
  promptbuilder/llm_client/types.py,sha256=2E-aPRb5uAkLFJocmjF1Lh2aQRq9r8a5JRIw-duHfjA,7460
19
21
  promptbuilder/llm_client/utils.py,sha256=79lvSppjrrItHB5MIozbp_5Oq7TsOK4Qzt9Ae3XMLFw,7624
20
- promptbuilder-0.4.18.dist-info/licenses/LICENSE,sha256=fqXmInzgsvEOIaKSBgcrwKyYCGYF0MKErJ0YivtODcc,1096
21
- promptbuilder-0.4.18.dist-info/METADATA,sha256=bbynjS91gKgHZKKCzw1VgD2FgI54Orn5OLRUqZJsQmA,3738
22
- promptbuilder-0.4.18.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
23
- promptbuilder-0.4.18.dist-info/top_level.txt,sha256=UBVcYn4UgrPy3O3fmmnPEU_kieuplBMgheetIMei4EI,14
24
- promptbuilder-0.4.18.dist-info/RECORD,,
22
+ promptbuilder-0.4.20.dist-info/licenses/LICENSE,sha256=fqXmInzgsvEOIaKSBgcrwKyYCGYF0MKErJ0YivtODcc,1096
23
+ promptbuilder-0.4.20.dist-info/METADATA,sha256=L4GN9TJv_sCOtJTelH1AC8bINLY66oXoBAh38lhgpKU,3738
24
+ promptbuilder-0.4.20.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
25
+ promptbuilder-0.4.20.dist-info/top_level.txt,sha256=UBVcYn4UgrPy3O3fmmnPEU_kieuplBMgheetIMei4EI,14
26
+ promptbuilder-0.4.20.dist-info/RECORD,,