promptbuilder 0.4.39__tar.gz → 0.4.41__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. {promptbuilder-0.4.39/promptbuilder.egg-info → promptbuilder-0.4.41}/PKG-INFO +1 -1
  2. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/llm_client/base_client.py +24 -11
  3. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/llm_client/google_client.py +7 -1
  4. {promptbuilder-0.4.39 → promptbuilder-0.4.41/promptbuilder.egg-info}/PKG-INFO +1 -1
  5. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/setup.py +1 -1
  6. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/LICENSE +0 -0
  7. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/MANIFEST.in +0 -0
  8. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/Readme.md +0 -0
  9. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/__init__.py +0 -0
  10. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/agent/__init__.py +0 -0
  11. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/agent/agent.py +0 -0
  12. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/agent/context.py +0 -0
  13. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/agent/tool.py +0 -0
  14. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/agent/utils.py +0 -0
  15. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/embeddings.py +0 -0
  16. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/llm_client/__init__.py +0 -0
  17. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/llm_client/aisuite_client.py +0 -0
  18. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/llm_client/anthropic_client.py +0 -0
  19. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/llm_client/bedrock_client.py +0 -0
  20. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/llm_client/config.py +0 -0
  21. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/llm_client/exceptions.py +0 -0
  22. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/llm_client/litellm_client.py +0 -0
  23. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/llm_client/logfire_decorators.py +0 -0
  24. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/llm_client/main.py +0 -0
  25. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/llm_client/openai_client.py +0 -0
  26. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/llm_client/types.py +0 -0
  27. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/llm_client/utils.py +0 -0
  28. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder/prompt_builder.py +0 -0
  29. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder.egg-info/SOURCES.txt +0 -0
  30. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder.egg-info/dependency_links.txt +0 -0
  31. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder.egg-info/requires.txt +0 -0
  32. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/promptbuilder.egg-info/top_level.txt +0 -0
  33. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/pyproject.toml +0 -0
  34. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/setup.cfg +0 -0
  35. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/tests/test_llm_client.py +0 -0
  36. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/tests/test_llm_client_async.py +0 -0
  37. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/tests/test_timeout_google.py +0 -0
  38. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/tests/test_timeout_litellm.py +0 -0
  39. {promptbuilder-0.4.39 → promptbuilder-0.4.41}/tests/test_timeout_openai.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: promptbuilder
3
- Version: 0.4.39
3
+ Version: 0.4.41
4
4
  Summary: Library for building prompts for LLMs
5
5
  Home-page: https://github.com/kapulkin/promptbuilder
6
6
  Author: Kapulkin Stanislav
@@ -58,7 +58,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
58
58
  return self.provider + ":" + self.model
59
59
 
60
60
  @staticmethod
61
- def as_json(text: str) -> Json:
61
+ def as_json(text: str, raise_on_error: bool = True) -> Json:
62
62
  # Remove markdown code block formatting if present
63
63
  text = text.strip()
64
64
 
@@ -72,7 +72,9 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
72
72
  try:
73
73
  return json.loads(text, strict=False)
74
74
  except json.JSONDecodeError as e:
75
- raise ValueError(f"Failed to parse LLM response as JSON:\n{text}")
75
+ if raise_on_error:
76
+ raise ValueError(f"Failed to parse LLM response as JSON:\n{text}")
77
+ return None
76
78
 
77
79
  def create(
78
80
  self,
@@ -109,7 +111,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
109
111
  finish_reason = response.candidates[0].finish_reason.value if response.candidates and response.candidates[0].finish_reason else None
110
112
  if autocomplete:
111
113
  while response.candidates and finish_reason == FinishReason.MAX_TOKENS.value:
112
- BaseLLMClient._append_generated_part(messages, response)
114
+ BaseLLMClient._append_generated_part(messages, response, result_type)
113
115
 
114
116
  response = self._create(
115
117
  messages=messages,
@@ -126,7 +128,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
126
128
  if max_tokens is not None and total_count >= max_tokens:
127
129
  break
128
130
  if response.candidates and response.candidates[0].content:
129
- appended_message = BaseLLMClient._append_generated_part(messages, response)
131
+ appended_message = BaseLLMClient._append_generated_part(messages, response, result_type)
130
132
  if appended_message is not None:
131
133
  response.candidates[0].content = appended_message
132
134
  return response
@@ -219,6 +221,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
219
221
  tools: list[Tool] | None = None,
220
222
  tool_choice_mode: Literal["ANY", "NONE"] = "NONE",
221
223
  autocomplete: bool = False,
224
+ raise_on_json_error: bool = True,
222
225
  ):
223
226
  if result_type == "tools":
224
227
  response = self.create(
@@ -254,7 +257,10 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
254
257
  return response.text
255
258
  else:
256
259
  if result_type == "json" and response.parsed is None:
257
- response.parsed = BaseLLMClient.as_json(response.text)
260
+ text = response.text
261
+ response.parsed = BaseLLMClient.as_json(text, raise_on_json_error)
262
+ if response.parsed is None:
263
+ return text
258
264
  return response.parsed
259
265
 
260
266
  @staticmethod
@@ -280,7 +286,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
280
286
  return None, None
281
287
 
282
288
  @staticmethod
283
- def _append_to_message(message: Content, text: str, is_thought: bool):
289
+ def _append_to_message(message: Content, text: str, is_thought: bool | None):
284
290
  if message.parts and message.parts[-1].text is not None and message.parts[-1].thought == is_thought:
285
291
  message.parts[-1].text += text
286
292
  else:
@@ -289,14 +295,17 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
289
295
  message.parts.append(Part(text=text, thought=is_thought))
290
296
 
291
297
  @staticmethod
292
- def _append_generated_part(messages: list[Content], response: Response) -> Content | None:
298
+ def _append_generated_part(messages: list[Content], response: Response, result_type: ResultType = None) -> Content | None:
293
299
  response_text, is_thought = BaseLLMClient._responce_to_text(response)
294
300
  if response_text is None:
295
301
  return None
296
302
 
297
303
  if len(messages) > 0 and messages[-1].role == "model":
298
304
  message_to_append = messages[-1]
299
- BaseLLMClient._append_to_message(message_to_append, response_text, is_thought)
305
+ if result_type is None or result_type == "str":
306
+ BaseLLMClient._append_to_message(message_to_append, response_text, is_thought)
307
+ else: # json, pydantic model
308
+ message_to_append.parts = [Part(text=response_text, thought=is_thought)]
300
309
  else:
301
310
  messages.append(Content(parts=[Part(text=response_text, thought=is_thought)], role="model"))
302
311
  return messages[-1]
@@ -527,7 +536,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
527
536
  finish_reason = response.candidates[0].finish_reason.value if response.candidates and response.candidates[0].finish_reason else None
528
537
  if autocomplete:
529
538
  while response.candidates and finish_reason == FinishReason.MAX_TOKENS.value:
530
- BaseLLMClient._append_generated_part(messages, response)
539
+ BaseLLMClient._append_generated_part(messages, response, result_type)
531
540
 
532
541
  response = await self._create(
533
542
  messages=messages,
@@ -544,7 +553,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
544
553
  if max_tokens is not None and total_count >= max_tokens:
545
554
  break
546
555
  if response.candidates and response.candidates[0].content:
547
- appended_message = BaseLLMClient._append_generated_part(messages, response)
556
+ appended_message = BaseLLMClient._append_generated_part(messages, response, result_type)
548
557
  if appended_message is not None:
549
558
  response.candidates[0].content = appended_message
550
559
  return response
@@ -637,6 +646,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
637
646
  tools: list[Tool] | None = None,
638
647
  tool_choice_mode: Literal["ANY", "NONE"] = "NONE",
639
648
  autocomplete: bool = False,
649
+ raise_on_json_error: bool = True,
640
650
  ):
641
651
  if result_type == "tools":
642
652
  response = await self._create(
@@ -671,7 +681,10 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
671
681
  return response.text
672
682
  else:
673
683
  if result_type == "json" and response.parsed is None:
674
- response.parsed = BaseLLMClient.as_json(response.text)
684
+ text = response.text
685
+ response.parsed = BaseLLMClient.as_json(text, raise_on_json_error)
686
+ if response.parsed is None:
687
+ return text
675
688
  return response.parsed
676
689
 
677
690
  @logfire_decorators.create_stream_async
@@ -273,7 +273,13 @@ class GoogleLLMClientAsync(BaseLLMClientAsync):
273
273
  thinking_config = self.default_thinking_config
274
274
  config.thinking_config = thinking_config
275
275
 
276
- if result_type is None or result_type == "json":
276
+ if result_type is None:
277
+ return await self.client.aio.models.generate_content(
278
+ model=self.model,
279
+ contents=messages,
280
+ config=config,
281
+ )
282
+ elif result_type == "json":
277
283
  config.response_mime_type = "application/json"
278
284
  return await self.client.aio.models.generate_content(
279
285
  model=self.model,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: promptbuilder
3
- Version: 0.4.39
3
+ Version: 0.4.41
4
4
  Summary: Library for building prompts for LLMs
5
5
  Home-page: https://github.com/kapulkin/promptbuilder
6
6
  Author: Kapulkin Stanislav
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="promptbuilder",
5
- version="0.4.39",
5
+ version="0.4.41",
6
6
  packages=find_packages(),
7
7
  install_requires=[
8
8
  "pydantic",
File without changes
File without changes
File without changes