promptbuilder 0.4.35__tar.gz → 0.4.36__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. {promptbuilder-0.4.35/promptbuilder.egg-info → promptbuilder-0.4.36}/PKG-INFO +1 -1
  2. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/llm_client/base_client.py +37 -36
  3. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/llm_client/logfire_decorators.py +18 -22
  4. {promptbuilder-0.4.35 → promptbuilder-0.4.36/promptbuilder.egg-info}/PKG-INFO +1 -1
  5. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/setup.py +1 -1
  6. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/LICENSE +0 -0
  7. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/MANIFEST.in +0 -0
  8. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/Readme.md +0 -0
  9. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/__init__.py +0 -0
  10. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/agent/__init__.py +0 -0
  11. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/agent/agent.py +0 -0
  12. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/agent/context.py +0 -0
  13. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/agent/tool.py +0 -0
  14. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/agent/utils.py +0 -0
  15. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/embeddings.py +0 -0
  16. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/llm_client/__init__.py +0 -0
  17. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/llm_client/aisuite_client.py +0 -0
  18. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/llm_client/anthropic_client.py +0 -0
  19. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/llm_client/bedrock_client.py +0 -0
  20. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/llm_client/config.py +0 -0
  21. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/llm_client/exceptions.py +0 -0
  22. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/llm_client/google_client.py +0 -0
  23. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/llm_client/litellm_client.py +0 -0
  24. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/llm_client/main.py +0 -0
  25. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/llm_client/openai_client.py +0 -0
  26. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/llm_client/types.py +0 -0
  27. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/llm_client/utils.py +0 -0
  28. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/llm_client/vertex_client.py +0 -0
  29. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder/prompt_builder.py +0 -0
  30. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder.egg-info/SOURCES.txt +0 -0
  31. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder.egg-info/dependency_links.txt +0 -0
  32. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder.egg-info/requires.txt +0 -0
  33. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/promptbuilder.egg-info/top_level.txt +0 -0
  34. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/pyproject.toml +0 -0
  35. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/setup.cfg +0 -0
  36. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/tests/test_llm_client.py +0 -0
  37. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/tests/test_llm_client_async.py +0 -0
  38. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/tests/test_timeout_google.py +0 -0
  39. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/tests/test_timeout_litellm.py +0 -0
  40. {promptbuilder-0.4.35 → promptbuilder-0.4.36}/tests/test_timeout_openai.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: promptbuilder
3
- Version: 0.4.35
3
+ Version: 0.4.36
4
4
  Summary: Library for building prompts for LLMs
5
5
  Home-page: https://github.com/kapulkin/promptbuilder
6
6
  Author: Kapulkin Stanislav
@@ -321,32 +321,32 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
321
321
  stream_messages = []
322
322
 
323
323
  total_count = 0
324
+ response = None
324
325
  for response in self._create_stream(
325
326
  messages=messages,
326
327
  thinking_config=thinking_config,
327
328
  system_message=system_message,
328
329
  max_tokens=max_tokens if not autocomplete else None,
329
330
  ):
330
- yield response
331
331
  BaseLLMClient._append_generated_part(stream_messages, response)
332
- finish_reason = response.candidates[0].finish_reason.value if response.candidates and response.candidates[0].finish_reason else None
333
332
  total_count += BaseLLMClient._response_out_tokens(response)
334
- if finish_reason:
335
- if autocomplete:
336
- while response.candidates and finish_reason == FinishReason.MAX_TOKENS.value:
337
- for response in self._create_stream(
338
- messages=messages,
339
- thinking_config=thinking_config,
340
- system_message=system_message,
341
- max_tokens=max_tokens if not autocomplete else None,
342
- ):
343
- yield response
344
- BaseLLMClient._append_generated_part(stream_messages, response)
345
- finish_reason = response.candidates[0].finish_reason.value if response.candidates and response.candidates[0].finish_reason else None
346
- total_count += BaseLLMClient._response_out_tokens(response)
347
- if max_tokens is not None and total_count >= max_tokens:
348
- break
349
-
333
+ yield response
334
+ finish_reason = response.candidates[0].finish_reason.value if response and response.candidates and response.candidates[0].finish_reason else None
335
+ if finish_reason and autocomplete:
336
+ while response.candidates and finish_reason == FinishReason.MAX_TOKENS.value:
337
+ for response in self._create_stream(
338
+ messages=messages,
339
+ thinking_config=thinking_config,
340
+ system_message=system_message,
341
+ max_tokens=max_tokens if not autocomplete else None,
342
+ ):
343
+ BaseLLMClient._append_generated_part(stream_messages, response)
344
+ total_count += BaseLLMClient._response_out_tokens(response)
345
+ yield response
346
+ finish_reason = response.candidates[0].finish_reason.value if response.candidates and response.candidates[0].finish_reason else None
347
+ if max_tokens is not None and total_count >= max_tokens:
348
+ break
349
+
350
350
  @overload
351
351
  def from_text(
352
352
  self,
@@ -679,27 +679,28 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
679
679
  system_message=system_message,
680
680
  max_tokens=max_tokens if not autocomplete else None,
681
681
  )
682
+ response = None
682
683
  async for response in stream_iter:
683
- yield response
684
684
  BaseLLMClient._append_generated_part(messages, response)
685
- finish_reason = response.candidates[0].finish_reason.value if response.candidates and response.candidates[0].finish_reason else None
686
685
  total_count += BaseLLMClient._response_out_tokens(response)
687
- if finish_reason:
688
- if autocomplete:
689
- while response.candidates and finish_reason == FinishReason.MAX_TOKENS.value:
690
- stream_iter = await self._create_stream(
691
- messages=messages,
692
- thinking_config=thinking_config,
693
- system_message=system_message,
694
- max_tokens=max_tokens if not autocomplete else None,
695
- )
696
- async for response in stream_iter:
697
- yield response
698
- BaseLLMClient._append_generated_part(messages, response)
699
- finish_reason = response.candidates[0].finish_reason.value if response.candidates and response.candidates[0].finish_reason else None
700
- total_count += BaseLLMClient._response_out_tokens(response)
701
- if max_tokens is not None and total_count >= max_tokens:
702
- break
686
+ yield response
687
+
688
+ finish_reason = response.candidates[0].finish_reason.value if response and response.candidates and response.candidates[0].finish_reason else None
689
+ if finish_reason and autocomplete:
690
+ while response.candidates and finish_reason == FinishReason.MAX_TOKENS.value:
691
+ stream_iter = await self._create_stream(
692
+ messages=messages,
693
+ thinking_config=thinking_config,
694
+ system_message=system_message,
695
+ max_tokens=max_tokens if not autocomplete else None,
696
+ )
697
+ async for response in stream_iter:
698
+ yield response
699
+ BaseLLMClient._append_generated_part(messages, response)
700
+ total_count += BaseLLMClient._response_out_tokens(response)
701
+ finish_reason = response.candidates[0].finish_reason.value if response.candidates and response.candidates[0].finish_reason else None
702
+ if max_tokens is not None and total_count >= max_tokens:
703
+ break
703
704
 
704
705
  @overload
705
706
  async def from_text(
@@ -54,6 +54,21 @@ def extract_response_data(response: Response) -> dict[str, Any]:
54
54
  return response_data
55
55
 
56
56
 
57
+ def record(span: logfire.LogfireSpan, duration: float, response: Response):
58
+ span.set_attribute("duration", duration)
59
+
60
+ span.set_attribute("response_data", extract_response_data(response))
61
+ span.set_attribute("candidates", response.candidates)
62
+ span.set_attribute("parsed", response.parsed)
63
+ span.set_attribute("response_text", response.text)
64
+ if response.usage_metadata is not None:
65
+ span.set_attribute("usage_metadata.cached_content_token_count", response.usage_metadata.cached_content_token_count)
66
+ span.set_attribute("usage_metadata.candidates_token_count", response.usage_metadata.candidates_token_count)
67
+ span.set_attribute("usage_metadata.thoughts_token_count", response.usage_metadata.thoughts_token_count)
68
+ span.set_attribute("usage_metadata.prompt_token_count", response.usage_metadata.prompt_token_count)
69
+ span.set_attribute("usage_metadata.total_token_count", response.usage_metadata.total_token_count)
70
+
71
+
57
72
  @inherited_decorator
58
73
  def create(class_method: Callable[P, Response]) -> Callable[P, Response]:
59
74
  """
@@ -69,17 +84,7 @@ def create(class_method: Callable[P, Response]) -> Callable[P, Response]:
69
84
  with logfire_llm.span(f"Create with {span_data["full_model_name"]}", **span_data) as span:
70
85
  start_time = time.time()
71
86
  response = class_method(self, *args, **kwargs)
72
- span.set_attribute("duration", time.time() - start_time)
73
-
74
- span.set_attribute("response_data", extract_response_data(response))
75
- span.set_attribute("candidates", response.candidates)
76
- span.set_attribute("parsed", response.parsed)
77
- span.set_attribute("response_text", response.text)
78
- if response.usage_metadata is not None:
79
- span.set_attribute("usage_metadata.cached_content_token_count", response.usage_metadata.cached_content_token_count)
80
- span.set_attribute("usage_metadata.candidates_token_count", response.usage_metadata.candidates_token_count)
81
- span.set_attribute("usage_metadata.prompt_token_count", response.usage_metadata.prompt_token_count)
82
- span.set_attribute("usage_metadata.total_token_count", response.usage_metadata.total_token_count)
87
+ record(span, time.time() - start_time, response)
83
88
 
84
89
  return response
85
90
 
@@ -101,17 +106,7 @@ def create_async(class_method: Callable[P, Awaitable[Response]]) -> Callable[P,
101
106
  with logfire_llm.span(f"Async create with {span_data["full_model_name"]}", **span_data) as span:
102
107
  start_time = time.time()
103
108
  response = await class_method(self, *args, **kwargs)
104
- span.set_attribute("duration", time.time() - start_time)
105
-
106
- span.set_attribute("response_data", extract_response_data(response))
107
- span.set_attribute("candidates", response.candidates)
108
- span.set_attribute("parsed", response.parsed)
109
- span.set_attribute("response_text", response.text)
110
- if response.usage_metadata is not None:
111
- span.set_attribute("usage_metadata.cached_content_token_count", response.usage_metadata.cached_content_token_count)
112
- span.set_attribute("usage_metadata.candidates_token_count", response.usage_metadata.candidates_token_count)
113
- span.set_attribute("usage_metadata.prompt_token_count", response.usage_metadata.prompt_token_count)
114
- span.set_attribute("usage_metadata.total_token_count", response.usage_metadata.total_token_count)
109
+ record(span, time.time() - start_time, response)
115
110
 
116
111
  return response
117
112
 
@@ -150,6 +145,7 @@ def record_streaming(span: logfire.LogfireSpan):
150
145
  span.set_attribute("response_text", stream_state.get_response_data()["message"]["content"])
151
146
  span.set_attribute("usage_metadata.cached_content_token_count", stream_state.last_usage_data.cached_content_token_count)
152
147
  span.set_attribute("usage_metadata.candidates_token_count", stream_state.last_usage_data.candidates_token_count)
148
+ span.set_attribute("usage_metadata.thoughts_token_count", stream_state.last_usage_data.thoughts_token_count)
153
149
  span.set_attribute("usage_metadata.prompt_token_count", stream_state.last_usage_data.prompt_token_count)
154
150
  span.set_attribute("usage_metadata.total_token_count", stream_state.last_usage_data.total_token_count)
155
151
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: promptbuilder
3
- Version: 0.4.35
3
+ Version: 0.4.36
4
4
  Summary: Library for building prompts for LLMs
5
5
  Home-page: https://github.com/kapulkin/promptbuilder
6
6
  Author: Kapulkin Stanislav
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="promptbuilder",
5
- version="0.4.35",
5
+ version="0.4.36",
6
6
  packages=find_packages(),
7
7
  install_requires=[
8
8
  "pydantic",
File without changes
File without changes
File without changes