promptbuilder 0.4.36__tar.gz → 0.4.38__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. {promptbuilder-0.4.36/promptbuilder.egg-info → promptbuilder-0.4.38}/PKG-INFO +1 -1
  2. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/llm_client/base_client.py +55 -32
  3. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/llm_client/logfire_decorators.py +6 -3
  4. {promptbuilder-0.4.36 → promptbuilder-0.4.38/promptbuilder.egg-info}/PKG-INFO +1 -1
  5. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/setup.py +1 -1
  6. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/LICENSE +0 -0
  7. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/MANIFEST.in +0 -0
  8. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/Readme.md +0 -0
  9. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/__init__.py +0 -0
  10. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/agent/__init__.py +0 -0
  11. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/agent/agent.py +0 -0
  12. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/agent/context.py +0 -0
  13. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/agent/tool.py +0 -0
  14. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/agent/utils.py +0 -0
  15. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/embeddings.py +0 -0
  16. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/llm_client/__init__.py +0 -0
  17. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/llm_client/aisuite_client.py +0 -0
  18. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/llm_client/anthropic_client.py +0 -0
  19. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/llm_client/bedrock_client.py +0 -0
  20. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/llm_client/config.py +0 -0
  21. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/llm_client/exceptions.py +0 -0
  22. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/llm_client/google_client.py +0 -0
  23. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/llm_client/litellm_client.py +0 -0
  24. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/llm_client/main.py +0 -0
  25. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/llm_client/openai_client.py +0 -0
  26. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/llm_client/types.py +0 -0
  27. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/llm_client/utils.py +0 -0
  28. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/llm_client/vertex_client.py +0 -0
  29. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder/prompt_builder.py +0 -0
  30. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder.egg-info/SOURCES.txt +0 -0
  31. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder.egg-info/dependency_links.txt +0 -0
  32. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder.egg-info/requires.txt +0 -0
  33. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/promptbuilder.egg-info/top_level.txt +0 -0
  34. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/pyproject.toml +0 -0
  35. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/setup.cfg +0 -0
  36. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/tests/test_llm_client.py +0 -0
  37. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/tests/test_llm_client_async.py +0 -0
  38. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/tests/test_timeout_google.py +0 -0
  39. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/tests/test_timeout_litellm.py +0 -0
  40. {promptbuilder-0.4.36 → promptbuilder-0.4.38}/tests/test_timeout_openai.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: promptbuilder
3
- Version: 0.4.36
3
+ Version: 0.4.38
4
4
  Summary: Library for building prompts for LLMs
5
5
  Home-page: https://github.com/kapulkin/promptbuilder
6
6
  Author: Kapulkin Stanislav
@@ -319,31 +319,44 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
319
319
  max_tokens = self.default_max_tokens
320
320
 
321
321
  stream_messages = []
322
-
323
322
  total_count = 0
324
- response = None
325
- for response in self._create_stream(
326
- messages=messages,
327
- thinking_config=thinking_config,
328
- system_message=system_message,
329
- max_tokens=max_tokens if not autocomplete else None,
330
- ):
323
+ response: Response | None = None
324
+
325
+ # Factory to (re)create the underlying provider stream using current accumulated state
326
+ def _stream_factory():
327
+ nonlocal response, total_count
328
+ tries = 3
329
+ while tries > 0:
330
+ try:
331
+ iter = self._create_stream(
332
+ messages=messages + stream_messages,
333
+ thinking_config=thinking_config,
334
+ system_message=system_message,
335
+ max_tokens=max_tokens if not autocomplete else None,
336
+ )
337
+ for response in iter:
338
+ yield response
339
+ break
340
+ except Exception as e:
341
+ tries -= 1
342
+ if tries == 0:
343
+ raise
344
+ logger.warning(f"Stream generation error: {e}, retrying...")
345
+
346
+ # Use retry to iterate through the stream; on exception previously yielded parts
347
+ # are already merged into stream_messages so resumed attempts continue generation.
348
+ for response in _stream_factory():
331
349
  BaseLLMClient._append_generated_part(stream_messages, response)
332
350
  total_count += BaseLLMClient._response_out_tokens(response)
333
351
  yield response
334
352
  finish_reason = response.candidates[0].finish_reason.value if response and response.candidates and response.candidates[0].finish_reason else None
335
353
  if finish_reason and autocomplete:
336
354
  while response.candidates and finish_reason == FinishReason.MAX_TOKENS.value:
337
- for response in self._create_stream(
338
- messages=messages,
339
- thinking_config=thinking_config,
340
- system_message=system_message,
341
- max_tokens=max_tokens if not autocomplete else None,
342
- ):
355
+ for response in _stream_factory():
343
356
  BaseLLMClient._append_generated_part(stream_messages, response)
344
357
  total_count += BaseLLMClient._response_out_tokens(response)
345
358
  yield response
346
- finish_reason = response.candidates[0].finish_reason.value if response.candidates and response.candidates[0].finish_reason else None
359
+ finish_reason = response.candidates[0].finish_reason.value if response and response.candidates and response.candidates[0].finish_reason else None
347
360
  if max_tokens is not None and total_count >= max_tokens:
348
361
  break
349
362
 
@@ -673,31 +686,41 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
673
686
  max_tokens = self.default_max_tokens
674
687
 
675
688
  total_count = 0
676
- stream_iter = await self._create_stream(
677
- messages=messages,
678
- thinking_config=thinking_config,
679
- system_message=system_message,
680
- max_tokens=max_tokens if not autocomplete else None,
681
- )
682
689
  response = None
690
+
691
+ async def _stream_factory():
692
+ nonlocal response, total_count
693
+ tries = 3
694
+ while tries > 0:
695
+ try:
696
+ iter = await self._create_stream(
697
+ messages=messages,
698
+ thinking_config=thinking_config,
699
+ system_message=system_message,
700
+ max_tokens=max_tokens if not autocomplete else None,
701
+ )
702
+
703
+ async for response in iter:
704
+ BaseLLMClient._append_generated_part(messages, response)
705
+ total_count += BaseLLMClient._response_out_tokens(response)
706
+ yield response
707
+ break
708
+ except Exception as e:
709
+ tries -= 1
710
+ if tries <= 0:
711
+ raise
712
+ logger.warning(f"Stream generation error: {e}, retrying...")
713
+
714
+ stream_iter = _stream_factory()
683
715
  async for response in stream_iter:
684
- BaseLLMClient._append_generated_part(messages, response)
685
- total_count += BaseLLMClient._response_out_tokens(response)
686
716
  yield response
687
-
717
+
688
718
  finish_reason = response.candidates[0].finish_reason.value if response and response.candidates and response.candidates[0].finish_reason else None
689
719
  if finish_reason and autocomplete:
690
720
  while response.candidates and finish_reason == FinishReason.MAX_TOKENS.value:
691
- stream_iter = await self._create_stream(
692
- messages=messages,
693
- thinking_config=thinking_config,
694
- system_message=system_message,
695
- max_tokens=max_tokens if not autocomplete else None,
696
- )
721
+ stream_iter = _stream_factory()
697
722
  async for response in stream_iter:
698
723
  yield response
699
- BaseLLMClient._append_generated_part(messages, response)
700
- total_count += BaseLLMClient._response_out_tokens(response)
701
724
  finish_reason = response.candidates[0].finish_reason.value if response.candidates and response.candidates[0].finish_reason else None
702
725
  if max_tokens is not None and total_count >= max_tokens:
703
726
  break
@@ -46,9 +46,12 @@ def extract_response_data(response: Response) -> dict[str, Any]:
46
46
  response_data = {"message": {"role": "assistant"}}
47
47
  response_data["message"]["content"] = response.text
48
48
  tool_calls = []
49
- for part in response.candidates[0].content.parts:
50
- if part.function_call is not None:
51
- tool_calls.append({"function": {"name": part.function_call.name, "arguments": part.function_call.args}})
49
+ if response.candidates is not None and len(response.candidates) > 0:
50
+ content = response.candidates[0].content
51
+ if content is not None and content.parts is not None:
52
+ for part in content.parts:
53
+ if part.function_call is not None:
54
+ tool_calls.append({"function": {"name": part.function_call.name, "arguments": part.function_call.args}})
52
55
  if len(tool_calls) > 0:
53
56
  response_data["message"]["tool_calls"] = tool_calls
54
57
  return response_data
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: promptbuilder
3
- Version: 0.4.36
3
+ Version: 0.4.38
4
4
  Summary: Library for building prompts for LLMs
5
5
  Home-page: https://github.com/kapulkin/promptbuilder
6
6
  Author: Kapulkin Stanislav
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="promptbuilder",
5
- version="0.4.36",
5
+ version="0.4.38",
6
6
  packages=find_packages(),
7
7
  install_requires=[
8
8
  "pydantic",
File without changes
File without changes
File without changes