promptbuilder 0.4.22__py3-none-any.whl → 0.4.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -63,7 +63,7 @@ class AiSuiteLLMClient(BaseLLMClient):
63
63
  total_token_count=usage.total_tokens if hasattr(usage, "total_tokens") else usage["total_tokens"],
64
64
  )
65
65
 
66
- def create(
66
+ def _create(
67
67
  self,
68
68
  messages: list[Content],
69
69
  result_type: ResultType = None,
@@ -222,7 +222,7 @@ class AiSuiteLLMClientAsync(BaseLLMClientAsync):
222
222
  total_token_count=usage.total_tokens if hasattr(usage, "total_tokens") else usage["total_tokens"],
223
223
  )
224
224
 
225
- async def create(
225
+ async def _create(
226
226
  self,
227
227
  messages: list[Content],
228
228
  result_type: ResultType = None,
@@ -192,7 +192,7 @@ class AnthropicLLMClient(BaseLLMClient):
192
192
  return anthropic_messages
193
193
 
194
194
  @_error_handler
195
- def create(
195
+ def _create(
196
196
  self,
197
197
  messages: list[Content],
198
198
  result_type: ResultType = None,
@@ -445,7 +445,7 @@ class AnthropicLLMClientAsync(BaseLLMClientAsync):
445
445
  return self._api_key
446
446
 
447
447
  @_error_handler_async
448
- async def create(
448
+ async def _create(
449
449
  self,
450
450
  messages: list[Content],
451
451
  result_type: ResultType = None,
@@ -77,7 +77,6 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
77
77
  @logfire_decorators.create
78
78
  @utils.retry_cls
79
79
  @utils.rpm_limit_cls
80
- @abstractmethod
81
80
  def create(
82
81
  self,
83
82
  messages: list[Content],
@@ -88,9 +87,57 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
88
87
  max_tokens: int | None = None,
89
88
  tools: list[Tool] | None = None,
90
89
  tool_config: ToolConfig = ToolConfig(),
90
+ autocomplete: bool = False
91
+ ) -> Response:
92
+ if autocomplete and (result_type == "tools" or isinstance(result_type, type)):
93
+ raise ValueError("autocompletion is not supported with 'tools' or pydantic model result_type")
94
+
95
+ if max_tokens is None:
96
+ max_tokens = self.default_max_tokens
97
+
98
+ response = self._create(
99
+ messages=messages,
100
+ result_type=result_type,
101
+ thinking_config=thinking_config,
102
+ system_message=system_message,
103
+ max_tokens=max_tokens,
104
+ tools=tools,
105
+ tool_config=tool_config,
106
+ )
107
+
108
+ total_count = BaseLLMClient._response_out_tokens(response)
109
+
110
+ while autocomplete and response.candidates and response.candidates[0].finish_reason == FinishReason.MAX_TOKENS:
111
+ BaseLLMClient._append_generated_part(messages, response)
112
+
113
+ response = self._create(
114
+ messages=messages,
115
+ result_type=result_type,
116
+ thinking_config=thinking_config,
117
+ system_message=system_message,
118
+ max_tokens=max_tokens,
119
+ tools=tools,
120
+ tool_config=tool_config,
121
+ )
122
+ total_count += BaseLLMClient._response_out_tokens(response)
123
+ if max_tokens is not None and total_count >= max_tokens:
124
+ break
125
+ return response
126
+
127
+ @abstractmethod
128
+ def _create(
129
+ self,
130
+ messages: list[Content],
131
+ result_type: ResultType = None,
132
+ *,
133
+ thinking_config: ThinkingConfig | None = None,
134
+ system_message: str | None = None,
135
+ max_tokens: int | None = None,
136
+ tools: list[Tool] | None = None,
137
+ tool_config: ToolConfig = ToolConfig(),
91
138
  ) -> Response:
92
139
  pass
93
-
140
+
94
141
  @overload
95
142
  def create_value(
96
143
  self,
@@ -102,6 +149,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
102
149
  max_tokens: int | None = None,
103
150
  tools: None = None,
104
151
  tool_choice_mode: Literal["NONE"] = "NONE",
152
+ autocomplete: bool = False,
105
153
  ) -> str: ...
106
154
  @overload
107
155
  def create_value(
@@ -114,6 +162,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
114
162
  max_tokens: int | None = None,
115
163
  tools: None = None,
116
164
  tool_choice_mode: Literal["NONE"] = "NONE",
165
+ autocomplete: bool = False,
117
166
  ) -> Json: ...
118
167
  @overload
119
168
  def create_value(
@@ -126,6 +175,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
126
175
  max_tokens: int | None = None,
127
176
  tools: None = None,
128
177
  tool_choice_mode: Literal["NONE"] = "NONE",
178
+ autocomplete: bool = False,
129
179
  ) -> PydanticStructure: ...
130
180
  @overload
131
181
  def create_value(
@@ -138,6 +188,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
138
188
  max_tokens: int | None = None,
139
189
  tools: list[Tool],
140
190
  tool_choice_mode: Literal["ANY"],
191
+ autocomplete: bool = False,
141
192
  ) -> list[FunctionCall]: ...
142
193
 
143
194
  def create_value(
@@ -177,21 +228,9 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
177
228
  max_tokens=max_tokens,
178
229
  tools=tools,
179
230
  tool_config=ToolConfig(function_calling_config=FunctionCallingConfig(mode=tool_choice_mode)),
231
+ autocomplete=autocomplete,
180
232
  )
181
233
 
182
- while autocomplete and response.candidates and response.candidates[0].finish_reason not in [FinishReason.STOP, FinishReason.MAX_TOKENS]:
183
- BaseLLMClient._append_generated_part(messages, response)
184
-
185
- response = self.create(
186
- messages=messages,
187
- result_type=result_type,
188
- thinking_config=thinking_config,
189
- system_message=system_message,
190
- max_tokens=max_tokens,
191
- tools=tools,
192
- tool_config=ToolConfig(function_calling_config=FunctionCallingConfig(mode=tool_choice_mode)),
193
- )
194
-
195
234
  if result_type is None:
196
235
  return response.text
197
236
  else:
@@ -206,14 +245,14 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
206
245
 
207
246
  text_parts = [
208
247
  part for part in response.candidates[0].content.parts if part.text is not None and not part.thought
209
- ] if response.candidates[0].content and response.candidates[0].content.parts else None
248
+ ] if response.candidates[0].content.parts else None
210
249
  if text_parts is not None and len(text_parts) > 0:
211
250
  response_text = "".join(part.text for part in text_parts)
212
251
  is_thought = False
213
252
  else:
214
253
  thought_parts = [
215
254
  part for part in response.candidates[0].content.parts if part.text and part.thought
216
- ] if response.candidates[0].content and response.candidates[0].content.parts else None
255
+ ] if response.candidates[0].content.parts else None
217
256
  if thought_parts is not None and len(thought_parts) > 0:
218
257
  response_text = "".join(part.text for part in thought_parts)
219
258
  is_thought = True
@@ -231,6 +270,10 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
231
270
  else:
232
271
  messages.append(Content(parts=[Part(text=response_text, thought=is_thought)], role="model"))
233
272
 
273
+ @staticmethod
274
+ def _response_out_tokens(response: Response):
275
+ return 0 if not response.usage_metadata else (response.usage_metadata.candidates_token_count or 0) + (response.usage_metadata.thoughts_token_count or 0)
276
+
234
277
  @logfire_decorators.create_stream
235
278
  @utils.retry_cls
236
279
  @utils.rpm_limit_cls
@@ -255,6 +298,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
255
298
  max_tokens: int | None = None,
256
299
  tools: None = None,
257
300
  tool_choice_mode: Literal["NONE"] = "NONE",
301
+ autocomplete: bool = False,
258
302
  ) -> str: ...
259
303
  @overload
260
304
  def from_text(
@@ -267,6 +311,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
267
311
  max_tokens: int | None = None,
268
312
  tools: None = None,
269
313
  tool_choice_mode: Literal["NONE"] = "NONE",
314
+ autocomplete: bool = False,
270
315
  ) -> Json: ...
271
316
  @overload
272
317
  def from_text(
@@ -279,6 +324,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
279
324
  max_tokens: int | None = None,
280
325
  tools: None = None,
281
326
  tool_choice_mode: Literal["NONE"] = "NONE",
327
+ autocomplete: bool = False,
282
328
  ) -> PydanticStructure: ...
283
329
  @overload
284
330
  def from_text(
@@ -291,6 +337,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
291
337
  max_tokens: int | None = None,
292
338
  tools: list[Tool],
293
339
  tool_choice_mode: Literal["ANY"],
340
+ autocomplete: bool = False,
294
341
  ) -> list[FunctionCall]: ...
295
342
 
296
343
  def from_text(
@@ -303,6 +350,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
303
350
  max_tokens: int | None = None,
304
351
  tools: list[Tool] | None = None,
305
352
  tool_choice_mode: Literal["ANY", "NONE"] = "NONE",
353
+ autocomplete: bool = False,
306
354
  ):
307
355
  return self.create_value(
308
356
  messages=[Content(parts=[Part(text=prompt)], role="user")],
@@ -312,6 +360,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
312
360
  max_tokens=max_tokens,
313
361
  tools=tools,
314
362
  tool_choice_mode=tool_choice_mode,
363
+ autocomplete=autocomplete,
315
364
  )
316
365
 
317
366
 
@@ -360,7 +409,6 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
360
409
  @logfire_decorators.create_async
361
410
  @utils.retry_cls_async
362
411
  @utils.rpm_limit_cls_async
363
- @abstractmethod
364
412
  async def create(
365
413
  self,
366
414
  messages: list[Content],
@@ -371,9 +419,57 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
371
419
  max_tokens: int | None = None,
372
420
  tools: list[Tool] | None = None,
373
421
  tool_config: ToolConfig = ToolConfig(),
422
+ autocomplete: bool = False,
423
+ ) -> Response:
424
+ if autocomplete and (result_type == "tools" or isinstance(result_type, type)):
425
+ raise ValueError("autocompletion is not supported with 'tools' or pydantic model result_type")
426
+
427
+ if max_tokens is None:
428
+ max_tokens = self.default_max_tokens
429
+
430
+ response = await self._create(
431
+ messages=messages,
432
+ result_type=result_type,
433
+ thinking_config=thinking_config,
434
+ system_message=system_message,
435
+ max_tokens=max_tokens,
436
+ tools=tools,
437
+ tool_config=tool_config,
438
+ )
439
+
440
+ total_count = BaseLLMClient._response_out_tokens(response)
441
+
442
+ while autocomplete and response.candidates and response.candidates[0].finish_reason == FinishReason.MAX_TOKENS:
443
+ BaseLLMClient._append_generated_part(messages, response)
444
+
445
+ response = await self._create(
446
+ messages=messages,
447
+ result_type=result_type,
448
+ thinking_config=thinking_config,
449
+ system_message=system_message,
450
+ max_tokens=max_tokens,
451
+ tools=tools,
452
+ tool_config=tool_config,
453
+ )
454
+ total_count += BaseLLMClient._response_out_tokens(response)
455
+ if max_tokens is not None and total_count >= max_tokens:
456
+ break
457
+ return response
458
+
459
+ @abstractmethod
460
+ async def _create(
461
+ self,
462
+ messages: list[Content],
463
+ result_type: ResultType = None,
464
+ *,
465
+ thinking_config: ThinkingConfig | None = None,
466
+ system_message: str | None = None,
467
+ max_tokens: int | None = None,
468
+ tools: list[Tool] | None = None,
469
+ tool_config: ToolConfig = ToolConfig(),
374
470
  ) -> Response:
375
471
  pass
376
-
472
+
377
473
  @overload
378
474
  async def create_value(
379
475
  self,
@@ -385,6 +481,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
385
481
  max_tokens: int | None = None,
386
482
  tools: None = None,
387
483
  tool_choice_mode: Literal["NONE"] = "NONE",
484
+ autocomplete: bool = False,
388
485
  ) -> str: ...
389
486
  @overload
390
487
  async def create_value(
@@ -397,6 +494,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
397
494
  max_tokens: int | None = None,
398
495
  tools: None = None,
399
496
  tool_choice_mode: Literal["NONE"] = "NONE",
497
+ autocomplete: bool = False,
400
498
  ) -> Json: ...
401
499
  @overload
402
500
  async def create_value(
@@ -409,6 +507,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
409
507
  max_tokens: int | None = None,
410
508
  tools: None = None,
411
509
  tool_choice_mode: Literal["NONE"] = "NONE",
510
+ autocomplete: bool = False,
412
511
  ) -> PydanticStructure: ...
413
512
  @overload
414
513
  async def create_value(
@@ -421,6 +520,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
421
520
  max_tokens: int | None = None,
422
521
  tools: list[Tool],
423
522
  tool_choice_mode: Literal["ANY"],
523
+ autocomplete: bool = False,
424
524
  ) -> list[FunctionCall]: ...
425
525
 
426
526
  async def create_value(
@@ -436,7 +536,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
436
536
  autocomplete: bool = False,
437
537
  ):
438
538
  if result_type == "tools":
439
- response = await self.create(
539
+ response = await self._create(
440
540
  messages=messages,
441
541
  result_type=None,
442
542
  thinking_config=thinking_config,
@@ -451,7 +551,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
451
551
  if part.function_call is not None:
452
552
  functions.append(part.function_call)
453
553
  return functions
454
-
554
+
455
555
  response = await self.create(
456
556
  messages=messages,
457
557
  result_type=result_type,
@@ -460,24 +560,8 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
460
560
  max_tokens=max_tokens,
461
561
  tools=tools,
462
562
  tool_config=ToolConfig(function_calling_config=FunctionCallingConfig(mode=tool_choice_mode)),
563
+ autocomplete=autocomplete
463
564
  )
464
-
465
- if max_tokens is None:
466
- max_tokens = self.default_max_tokens
467
-
468
- while autocomplete and response.candidates and response.candidates[0].finish_reason not in [FinishReason.STOP, FinishReason.MAX_TOKENS]:
469
- BaseLLMClient._append_generated_part(messages, response)
470
-
471
- response = await self.create(
472
- messages=messages,
473
- result_type=result_type,
474
- thinking_config=thinking_config,
475
- system_message=system_message,
476
- max_tokens=max_tokens,
477
- tools=tools,
478
- tool_config=ToolConfig(function_calling_config=FunctionCallingConfig(mode=tool_choice_mode)),
479
- )
480
-
481
565
  if result_type is None:
482
566
  return response.text
483
567
  else:
@@ -509,6 +593,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
509
593
  max_tokens: int | None = None,
510
594
  tools: None = None,
511
595
  tool_choice_mode: Literal["NONE"] = "NONE",
596
+ autocomplete: bool = False,
512
597
  ) -> str: ...
513
598
  @overload
514
599
  async def from_text(
@@ -521,6 +606,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
521
606
  max_tokens: int | None = None,
522
607
  tools: None = None,
523
608
  tool_choice_mode: Literal["NONE"] = "NONE",
609
+ autocomplete: bool = False,
524
610
  ) -> Json: ...
525
611
  @overload
526
612
  async def from_text(
@@ -533,6 +619,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
533
619
  max_tokens: int | None = None,
534
620
  tools: None = None,
535
621
  tool_choice_mode: Literal["NONE"] = "NONE",
622
+ autocomplete: bool = False,
536
623
  ) -> PydanticStructure: ...
537
624
  @overload
538
625
  async def from_text(
@@ -545,6 +632,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
545
632
  max_tokens: int | None = None,
546
633
  tools: list[Tool],
547
634
  tool_choice_mode: Literal["ANY"],
635
+ autocomplete: bool = False,
548
636
  ) -> list[FunctionCall]: ...
549
637
 
550
638
  async def from_text(
@@ -557,6 +645,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
557
645
  max_tokens: int | None = None,
558
646
  tools: list[Tool] | None = None,
559
647
  tool_choice_mode: Literal["ANY", "NONE"] = "NONE",
648
+ autocomplete: bool = False,
560
649
  ):
561
650
  return await self.create_value(
562
651
  messages=[Content(parts=[Part(text=prompt)], role="user")],
@@ -566,6 +655,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
566
655
  max_tokens=max_tokens,
567
656
  tools=tools,
568
657
  tool_choice_mode=tool_choice_mode,
658
+ autocomplete=autocomplete,
569
659
  )
570
660
 
571
661
 
@@ -586,7 +676,7 @@ class CachedLLMClient(BaseLLMClient):
586
676
  self.llm_client = llm_client
587
677
  self.cache_dir = cache_dir
588
678
 
589
- def create(self, messages: list[Content], **kwargs) -> Response:
679
+ def _create(self, messages: list[Content], **kwargs) -> Response:
590
680
  response, messages_dump, cache_path = CachedLLMClient.create_cached(self.llm_client, self.cache_dir, messages, **kwargs)
591
681
  if response is not None:
592
682
  return response
@@ -635,8 +725,8 @@ class CachedLLMClientAsync(BaseLLMClientAsync):
635
725
  self.provider = llm_client.provider
636
726
  self.llm_client = llm_client
637
727
  self.cache_dir = cache_dir
638
-
639
- async def create(self, messages: list[Content], **kwargs) -> Response:
728
+
729
+ async def _create(self, messages: list[Content], **kwargs) -> Response:
640
730
  response, messages_dump, cache_path = CachedLLMClient.create_cached(self.llm_client, self.cache_dir, messages, **kwargs)
641
731
  if response is not None:
642
732
  return response
@@ -103,7 +103,7 @@ class BedrockLLMClient(BaseLLMClient):
103
103
  return self._api_key
104
104
 
105
105
  @_error_handler
106
- def create(
106
+ def _create(
107
107
  self,
108
108
  messages: list[Content],
109
109
  result_type: ResultType = None,
@@ -399,7 +399,7 @@ class BedrockLLMClientAsync(BaseLLMClientAsync):
399
399
  return self._api_key
400
400
 
401
401
  @_error_handler_async
402
- async def create(
402
+ async def _create(
403
403
  self,
404
404
  messages: list[Content],
405
405
  result_type: ResultType = None,
@@ -87,7 +87,7 @@ class GoogleLLMClient(BaseLLMClient):
87
87
  return new_messages
88
88
 
89
89
  @_error_handler
90
- def create(
90
+ def _create(
91
91
  self,
92
92
  messages: list[Content],
93
93
  result_type: ResultType = None,
@@ -240,7 +240,7 @@ class GoogleLLMClientAsync(BaseLLMClientAsync):
240
240
  return self._api_key
241
241
 
242
242
  @_error_handler_async
243
- async def create(
243
+ async def _create(
244
244
  self,
245
245
  messages: list[Content],
246
246
  result_type: ResultType = None,
@@ -142,7 +142,7 @@ class OpenaiLLMClient(BaseLLMClient):
142
142
  return openai_thinking_config
143
143
 
144
144
  @_error_handler
145
- def create(
145
+ def _create(
146
146
  self,
147
147
  messages: list[Content],
148
148
  result_type: ResultType = None,
@@ -377,7 +377,7 @@ class OpenaiLLMClientAsync(BaseLLMClientAsync):
377
377
  return self._api_key
378
378
 
379
379
  @_error_handler_async
380
- async def create(
380
+ async def _create(
381
381
  self,
382
382
  messages: list[Content],
383
383
  result_type: ResultType = None,
@@ -1,9 +1,9 @@
1
1
  import logging
2
2
  from abc import ABC, abstractmethod
3
- from typing import Optional, Any, Callable, Literal, TypeVar, Self
3
+ from typing import Optional, Any, Callable, Literal, TypeVar, Self, Protocol, runtime_checkable
4
4
  from enum import Enum
5
5
 
6
- from pydantic import BaseModel, model_validator
6
+ from pydantic import BaseModel, model_validator, ConfigDict
7
7
 
8
8
 
9
9
  logger = logging.getLogger(__name__)
@@ -17,6 +17,16 @@ PydanticStructure = TypeVar("PydanticStructure", bound=BaseModel)
17
17
  type ResultType = Literal["json"] | type[PydanticStructure] | None
18
18
 
19
19
 
20
+ @runtime_checkable
21
+ class PartLike(Protocol):
22
+ """Protocol for Part-like objects that have the same interface as Part."""
23
+ text: Optional[str]
24
+ function_call: Optional[Any] # Using Any to allow different FunctionCall types
25
+ function_response: Optional[Any] # Using Any to allow different FunctionResponse types
26
+ thought: Optional[bool]
27
+ inline_data: Optional[Any] # Using Any to allow different Blob types
28
+
29
+
20
30
  class CustomApiKey(ABC):
21
31
  @abstractmethod
22
32
  def __hash__(self):
@@ -80,14 +90,16 @@ class Part(BaseModel):
80
90
  return cls(inline_data=inline_data)
81
91
 
82
92
  class Content(BaseModel):
83
- parts: Optional[list[Part]] = None
84
- role: Optional[Role] = None
93
+ model_config = ConfigDict(arbitrary_types_allowed=True)
94
+
95
+ parts: list[Part | PartLike] | None = None
96
+ role: Role | None = None
85
97
 
86
98
  def as_str(self) -> str:
87
99
  if self.parts is None:
88
100
  return ""
89
101
  else:
90
- return "\n".join([part.as_str() for part in self.parts])
102
+ return "\n".join([(part.text or "") for part in self.parts])
91
103
 
92
104
 
93
105
  class FinishReason(Enum):
@@ -131,6 +143,7 @@ class UsageMetadata(BaseModel):
131
143
  cached_content_token_count: Optional[int] = None
132
144
  candidates_token_count: Optional[int] = None
133
145
  prompt_token_count: Optional[int] = None
146
+ thoughts_token_count: Optional[int] = None
134
147
  total_token_count: Optional[int] = None
135
148
 
136
149
  class ThinkingConfig(BaseModel):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: promptbuilder
3
- Version: 0.4.22
3
+ Version: 0.4.24
4
4
  Summary: Library for building prompts for LLMs
5
5
  Home-page: https://github.com/kapulkin/promptbuilder
6
6
  Author: Kapulkin Stanislav
@@ -7,20 +7,20 @@ promptbuilder/agent/context.py,sha256=CVw715vFrhfvddQmRNy4A1U87GsZyIKj9Xu4SCidbc
7
7
  promptbuilder/agent/tool.py,sha256=VDbIHK3_Q62Ei7hwLF7nIgHq-PTMKnv1NSjHpDYkUZE,2651
8
8
  promptbuilder/agent/utils.py,sha256=vTkphKw04v_QDIJtoB2JKK0RGY6iI1t_0LbmuStunzI,356
9
9
  promptbuilder/llm_client/__init__.py,sha256=wJ33cnRtZX_YPsbcGxEu3SEZMOhPX7-fHI59MEPUe7I,517
10
- promptbuilder/llm_client/aisuite_client.py,sha256=aMqg05zefzck9Lz7pm7jZoKFdzr_ymFYhrAjZtzdHlQ,15561
11
- promptbuilder/llm_client/anthropic_client.py,sha256=vWuyFZL_LohOE0UYjB1-zTr4tJZMUcGk8H10gpjzdkk,28074
12
- promptbuilder/llm_client/base_client.py,sha256=xDDCIAIuLVf6ouItLVzGhP0a3hkr71oTH9jH790zkWE,24556
13
- promptbuilder/llm_client/bedrock_client.py,sha256=e9vUClbybQb32028oDBW6IbyPYqj1ZSSv9y36ZqUWxM,27941
10
+ promptbuilder/llm_client/aisuite_client.py,sha256=_TfB1xv73kIn4n8ulV3bj8bHOVm4DOD5uIdX-pbYoXY,15563
11
+ promptbuilder/llm_client/anthropic_client.py,sha256=HSIAZVOQoi3hinjhBVGqpt91k0x38xj6EUsPSUrlAA0,28076
12
+ promptbuilder/llm_client/base_client.py,sha256=0U449lHLjl3llyjQlB4hkMGOWl8cE-BtdT7L14AtIco,27835
13
+ promptbuilder/llm_client/bedrock_client.py,sha256=rJMzVV7x_sNJ1nVVqWU1sU-Pq7xlxFxIa_hTD2wtM1Y,27943
14
14
  promptbuilder/llm_client/config.py,sha256=exQEm35wp7lK5SfXNpN5H9VZEb2LVa4pyZ-cxGt1U-U,1124
15
15
  promptbuilder/llm_client/exceptions.py,sha256=t-X7r_a8B1jNu8eEavde1jXu5dz97yV3IG4YHOtgh0Y,4836
16
- promptbuilder/llm_client/google_client.py,sha256=y1_CFXBijUiRTyAJsh-8a6CGIwwlZBskO5kWqVWZcPo,11780
16
+ promptbuilder/llm_client/google_client.py,sha256=GzKd_EeJY_GEoZrI6I3ZUAk1PRDBBkdJfEPUKgmlUXM,11782
17
17
  promptbuilder/llm_client/logfire_decorators.py,sha256=un_QnIekypOEcqTZ5v1y9pwijGnF95xwnwKO5rFSHVY,9667
18
18
  promptbuilder/llm_client/main.py,sha256=5r_MhKVTD4cS90AHR89JJRKiWYBk35Y3JvhvmOxkYHc,8110
19
- promptbuilder/llm_client/openai_client.py,sha256=GdyTbUPsbACXZYF0BnCRyLVw24_WM1R_MMr6pDpiiV4,24787
20
- promptbuilder/llm_client/types.py,sha256=2E-aPRb5uAkLFJocmjF1Lh2aQRq9r8a5JRIw-duHfjA,7460
19
+ promptbuilder/llm_client/openai_client.py,sha256=lT0RCiixJBoCtzUbL_0J5NQ5G8KGONzK3dQ73_NgL78,24789
20
+ promptbuilder/llm_client/types.py,sha256=kgbg5FRzvZwu98y1OhAZJDneXBNPnsFZueQCr9HXIY4,8063
21
21
  promptbuilder/llm_client/utils.py,sha256=79lvSppjrrItHB5MIozbp_5Oq7TsOK4Qzt9Ae3XMLFw,7624
22
- promptbuilder-0.4.22.dist-info/licenses/LICENSE,sha256=fqXmInzgsvEOIaKSBgcrwKyYCGYF0MKErJ0YivtODcc,1096
23
- promptbuilder-0.4.22.dist-info/METADATA,sha256=GtIz1H1kWvHz_1nyCYbkzmkUwsGLYE5jtaKCpt59eHc,3729
24
- promptbuilder-0.4.22.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
25
- promptbuilder-0.4.22.dist-info/top_level.txt,sha256=UBVcYn4UgrPy3O3fmmnPEU_kieuplBMgheetIMei4EI,14
26
- promptbuilder-0.4.22.dist-info/RECORD,,
22
+ promptbuilder-0.4.24.dist-info/licenses/LICENSE,sha256=fqXmInzgsvEOIaKSBgcrwKyYCGYF0MKErJ0YivtODcc,1096
23
+ promptbuilder-0.4.24.dist-info/METADATA,sha256=pHr-63JQFZe_KY1u-uDkq41wu4FnBgyxGHZMAuRddfk,3729
24
+ promptbuilder-0.4.24.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
25
+ promptbuilder-0.4.24.dist-info/top_level.txt,sha256=UBVcYn4UgrPy3O3fmmnPEU_kieuplBMgheetIMei4EI,14
26
+ promptbuilder-0.4.24.dist-info/RECORD,,