promptbuilder 0.4.31__py3-none-any.whl → 0.4.32__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -98,7 +98,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
98
98
  result_type=result_type,
99
99
  thinking_config=thinking_config,
100
100
  system_message=system_message,
101
- max_tokens=max_tokens,
101
+ max_tokens=max_tokens if not autocomplete else None,
102
102
  timeout=timeout,
103
103
  tools=tools,
104
104
  tool_config=tool_config,
@@ -116,7 +116,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
116
116
  result_type=result_type,
117
117
  thinking_config=thinking_config,
118
118
  system_message=system_message,
119
- max_tokens=max_tokens,
119
+ max_tokens=max_tokens if not autocomplete else None,
120
120
  timeout=timeout,
121
121
  tools=tools,
122
122
  tool_config=tool_config,
@@ -449,7 +449,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
449
449
  result_type=result_type,
450
450
  thinking_config=thinking_config,
451
451
  system_message=system_message,
452
- max_tokens=max_tokens,
452
+ max_tokens=max_tokens if not autocomplete else None,
453
453
  timeout=timeout,
454
454
  tools=tools,
455
455
  tool_config=tool_config,
@@ -467,7 +467,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
467
467
  result_type=result_type,
468
468
  thinking_config=thinking_config,
469
469
  system_message=system_message,
470
- max_tokens=max_tokens,
470
+ max_tokens=max_tokens if not autocomplete else None,
471
471
  timeout=timeout,
472
472
  tools=tools,
473
473
  tool_config=tool_config,
@@ -23,7 +23,7 @@ from promptbuilder.llm_client.config import DecoratorConfigs
23
23
  from promptbuilder.prompt_builder import PromptBuilder
24
24
 
25
25
 
26
- class LiteLLMLLMClient(BaseLLMClient):
26
+ class LiteLLMClient(BaseLLMClient):
27
27
  provider: str = ""
28
28
  user_tag: Role = "user"
29
29
  assistant_tag: Role = "model"
@@ -241,7 +241,7 @@ class LiteLLMLLMClient(BaseLLMClient):
241
241
  finish_reason_val = first_choice.get("finish_reason")
242
242
  else:
243
243
  finish_reason_val = getattr(first_choice, "finish_reason", None)
244
- mapped_finish_reason = LiteLLMLLMClient._map_finish_reason(finish_reason_val)
244
+ mapped_finish_reason = LiteLLMClient._map_finish_reason(finish_reason_val)
245
245
 
246
246
  content_parts: list[Part | Any] = list(parts)
247
247
  return Response(
@@ -293,7 +293,7 @@ class LiteLLMLLMClient(BaseLLMClient):
293
293
  finish_reason_val = first_choice.get("finish_reason")
294
294
  else:
295
295
  finish_reason_val = getattr(first_choice, "finish_reason", None)
296
- mapped_finish_reason = LiteLLMLLMClient._map_finish_reason(finish_reason_val)
296
+ mapped_finish_reason = LiteLLMClient._map_finish_reason(finish_reason_val)
297
297
 
298
298
  content_parts2: list[Part | Any] = list(parts)
299
299
  return Response(
@@ -311,7 +311,7 @@ class LiteLLMLLMClient(BaseLLMClient):
311
311
  raise ValueError(f"Unsupported result_type: {result_type}. Supported types are: None, 'json', or a Pydantic model.")
312
312
 
313
313
 
314
- class LiteLLMLLMClientAsync(BaseLLMClientAsync):
314
+ class LiteLLMClientAsync(BaseLLMClientAsync):
315
315
  provider: str = ""
316
316
  user_tag: Role = "user"
317
317
  assistant_tag: Role = "model"
@@ -341,11 +341,11 @@ class LiteLLMLLMClientAsync(BaseLLMClientAsync):
341
341
 
342
342
  @staticmethod
343
343
  def make_function_call(tool_call) -> FunctionCall | None:
344
- return LiteLLMLLMClient.make_function_call(tool_call)
344
+ return LiteLLMClient.make_function_call(tool_call)
345
345
 
346
346
  @staticmethod
347
347
  def make_usage_metadata(usage) -> UsageMetadata:
348
- return LiteLLMLLMClient.make_usage_metadata(usage)
348
+ return LiteLLMClient.make_usage_metadata(usage)
349
349
 
350
350
  async def _create(
351
351
  self,
@@ -450,7 +450,7 @@ class LiteLLMLLMClientAsync(BaseLLMClientAsync):
450
450
  finish_reason_val = first_choice.get("finish_reason")
451
451
  else:
452
452
  finish_reason_val = getattr(first_choice, "finish_reason", None)
453
- mapped_finish_reason = LiteLLMLLMClient._map_finish_reason(finish_reason_val)
453
+ mapped_finish_reason = LiteLLMClient._map_finish_reason(finish_reason_val)
454
454
 
455
455
  content_parts3: list[Part | Any] = list(parts)
456
456
  return Response(
@@ -502,7 +502,7 @@ class LiteLLMLLMClientAsync(BaseLLMClientAsync):
502
502
  finish_reason_val = first_choice.get("finish_reason")
503
503
  else:
504
504
  finish_reason_val = getattr(first_choice, "finish_reason", None)
505
- mapped_finish_reason = LiteLLMLLMClient._map_finish_reason(finish_reason_val)
505
+ mapped_finish_reason = LiteLLMClient._map_finish_reason(finish_reason_val)
506
506
 
507
507
  content_parts4: list[Part | Any] = list(parts)
508
508
  return Response(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: promptbuilder
3
- Version: 0.4.31
3
+ Version: 0.4.32
4
4
  Summary: Library for building prompts for LLMs
5
5
  Home-page: https://github.com/kapulkin/promptbuilder
6
6
  Author: Kapulkin Stanislav
@@ -9,20 +9,20 @@ promptbuilder/agent/utils.py,sha256=vTkphKw04v_QDIJtoB2JKK0RGY6iI1t_0LbmuStunzI,
9
9
  promptbuilder/llm_client/__init__.py,sha256=wJ33cnRtZX_YPsbcGxEu3SEZMOhPX7-fHI59MEPUe7I,517
10
10
  promptbuilder/llm_client/aisuite_client.py,sha256=8inY3UoH8o9yEOvRYP6a_8pjGQK0W_f9eV8MmHzpKTU,15641
11
11
  promptbuilder/llm_client/anthropic_client.py,sha256=GL5FRmqu2iQqU44joaviEaRpEp4h_USpUiYc8sWu52Y,28326
12
- promptbuilder/llm_client/base_client.py,sha256=x9s_pyOiOWlSjTnRo162GWcI4pILoCCwomFoLGrn0RU,29922
12
+ promptbuilder/llm_client/base_client.py,sha256=WjzIvR3H8QqnF65RCJock4qwEFb8fPy3W0BnKMNVkg4,30042
13
13
  promptbuilder/llm_client/bedrock_client.py,sha256=PGb7KxaK0QwhsZ9frz07h7I2zeyjMMWqIYC7DS6AZp0,28181
14
14
  promptbuilder/llm_client/config.py,sha256=exQEm35wp7lK5SfXNpN5H9VZEb2LVa4pyZ-cxGt1U-U,1124
15
15
  promptbuilder/llm_client/exceptions.py,sha256=t-X7r_a8B1jNu8eEavde1jXu5dz97yV3IG4YHOtgh0Y,4836
16
16
  promptbuilder/llm_client/google_client.py,sha256=ZjJjDUQZH6zAIRoi4xUx3IDEm8jRkVWGyehy5P_Ba_M,12170
17
- promptbuilder/llm_client/litellm_client.py,sha256=XoYZmeU8XuROhvzVqbdjaWPktOSVKjehIAZgC1C6Lgo,25585
17
+ promptbuilder/llm_client/litellm_client.py,sha256=NxVJORvQy5fSoqzPOagBliAhg2fjnFNnV5ZF7qckbVI,25561
18
18
  promptbuilder/llm_client/logfire_decorators.py,sha256=un_QnIekypOEcqTZ5v1y9pwijGnF95xwnwKO5rFSHVY,9667
19
19
  promptbuilder/llm_client/main.py,sha256=2Q7J5FwivX2YwvptzoSEtCfvfcI9p5HC55D3mMb2se4,8243
20
20
  promptbuilder/llm_client/openai_client.py,sha256=QMXX7VPYWFo1VvX8bWF6jpi95ZIOk_MMBpz-14GrT-k,25274
21
21
  promptbuilder/llm_client/types.py,sha256=kgbg5FRzvZwu98y1OhAZJDneXBNPnsFZueQCr9HXIY4,8063
22
22
  promptbuilder/llm_client/utils.py,sha256=79lvSppjrrItHB5MIozbp_5Oq7TsOK4Qzt9Ae3XMLFw,7624
23
23
  promptbuilder/llm_client/vertex_client.py,sha256=aewidTryIpFMlTRFmDqOG7O-NCbvTP5wW6I3-3vQShE,15002
24
- promptbuilder-0.4.31.dist-info/licenses/LICENSE,sha256=fqXmInzgsvEOIaKSBgcrwKyYCGYF0MKErJ0YivtODcc,1096
25
- promptbuilder-0.4.31.dist-info/METADATA,sha256=bcAusvEhtctjGM_TGcZb6cpjbmD3BdSj6ajd6gawga0,3799
26
- promptbuilder-0.4.31.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
27
- promptbuilder-0.4.31.dist-info/top_level.txt,sha256=UBVcYn4UgrPy3O3fmmnPEU_kieuplBMgheetIMei4EI,14
28
- promptbuilder-0.4.31.dist-info/RECORD,,
24
+ promptbuilder-0.4.32.dist-info/licenses/LICENSE,sha256=fqXmInzgsvEOIaKSBgcrwKyYCGYF0MKErJ0YivtODcc,1096
25
+ promptbuilder-0.4.32.dist-info/METADATA,sha256=8BdCEIoYzptCDbU4SlpgtfIGIkdKob2A6ksOpSXWdVw,3799
26
+ promptbuilder-0.4.32.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
27
+ promptbuilder-0.4.32.dist-info/top_level.txt,sha256=UBVcYn4UgrPy3O3fmmnPEU_kieuplBMgheetIMei4EI,14
28
+ promptbuilder-0.4.32.dist-info/RECORD,,