promptbuilder 0.4.40__py3-none-any.whl → 0.4.42__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- promptbuilder/llm_client/base_client.py +25 -11
- promptbuilder/llm_client/google_client.py +188 -17
- {promptbuilder-0.4.40.dist-info → promptbuilder-0.4.42.dist-info}/METADATA +1 -1
- {promptbuilder-0.4.40.dist-info → promptbuilder-0.4.42.dist-info}/RECORD +7 -7
- {promptbuilder-0.4.40.dist-info → promptbuilder-0.4.42.dist-info}/WHEEL +0 -0
- {promptbuilder-0.4.40.dist-info → promptbuilder-0.4.42.dist-info}/licenses/LICENSE +0 -0
- {promptbuilder-0.4.40.dist-info → promptbuilder-0.4.42.dist-info}/top_level.txt +0 -0
|
@@ -58,7 +58,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
58
58
|
return self.provider + ":" + self.model
|
|
59
59
|
|
|
60
60
|
@staticmethod
|
|
61
|
-
def as_json(text: str) -> Json:
|
|
61
|
+
def as_json(text: str, raise_on_error: bool = True) -> Json:
|
|
62
62
|
# Remove markdown code block formatting if present
|
|
63
63
|
text = text.strip()
|
|
64
64
|
|
|
@@ -72,7 +72,9 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
72
72
|
try:
|
|
73
73
|
return json.loads(text, strict=False)
|
|
74
74
|
except json.JSONDecodeError as e:
|
|
75
|
-
|
|
75
|
+
if raise_on_error:
|
|
76
|
+
raise ValueError(f"Failed to parse LLM response as JSON:\n{text}")
|
|
77
|
+
return None
|
|
76
78
|
|
|
77
79
|
def create(
|
|
78
80
|
self,
|
|
@@ -109,7 +111,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
109
111
|
finish_reason = response.candidates[0].finish_reason.value if response.candidates and response.candidates[0].finish_reason else None
|
|
110
112
|
if autocomplete:
|
|
111
113
|
while response.candidates and finish_reason == FinishReason.MAX_TOKENS.value:
|
|
112
|
-
BaseLLMClient._append_generated_part(messages, response)
|
|
114
|
+
BaseLLMClient._append_generated_part(messages, response, result_type)
|
|
113
115
|
|
|
114
116
|
response = self._create(
|
|
115
117
|
messages=messages,
|
|
@@ -126,7 +128,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
126
128
|
if max_tokens is not None and total_count >= max_tokens:
|
|
127
129
|
break
|
|
128
130
|
if response.candidates and response.candidates[0].content:
|
|
129
|
-
appended_message = BaseLLMClient._append_generated_part(messages, response)
|
|
131
|
+
appended_message = BaseLLMClient._append_generated_part(messages, response, result_type)
|
|
130
132
|
if appended_message is not None:
|
|
131
133
|
response.candidates[0].content = appended_message
|
|
132
134
|
return response
|
|
@@ -219,6 +221,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
219
221
|
tools: list[Tool] | None = None,
|
|
220
222
|
tool_choice_mode: Literal["ANY", "NONE"] = "NONE",
|
|
221
223
|
autocomplete: bool = False,
|
|
224
|
+
raise_on_json_error: bool = True,
|
|
222
225
|
):
|
|
223
226
|
if result_type == "tools":
|
|
224
227
|
response = self.create(
|
|
@@ -254,7 +257,10 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
254
257
|
return response.text
|
|
255
258
|
else:
|
|
256
259
|
if result_type == "json" and response.parsed is None:
|
|
257
|
-
|
|
260
|
+
text = response.text
|
|
261
|
+
response.parsed = BaseLLMClient.as_json(text, raise_on_json_error)
|
|
262
|
+
if response.parsed is None:
|
|
263
|
+
return text
|
|
258
264
|
return response.parsed
|
|
259
265
|
|
|
260
266
|
@staticmethod
|
|
@@ -280,7 +286,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
280
286
|
return None, None
|
|
281
287
|
|
|
282
288
|
@staticmethod
|
|
283
|
-
def _append_to_message(message: Content, text: str, is_thought: bool):
|
|
289
|
+
def _append_to_message(message: Content, text: str, is_thought: bool | None):
|
|
284
290
|
if message.parts and message.parts[-1].text is not None and message.parts[-1].thought == is_thought:
|
|
285
291
|
message.parts[-1].text += text
|
|
286
292
|
else:
|
|
@@ -289,14 +295,17 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
289
295
|
message.parts.append(Part(text=text, thought=is_thought))
|
|
290
296
|
|
|
291
297
|
@staticmethod
|
|
292
|
-
def _append_generated_part(messages: list[Content], response: Response) -> Content | None:
|
|
298
|
+
def _append_generated_part(messages: list[Content], response: Response, result_type: ResultType = None) -> Content | None:
|
|
293
299
|
response_text, is_thought = BaseLLMClient._responce_to_text(response)
|
|
294
300
|
if response_text is None:
|
|
295
301
|
return None
|
|
296
302
|
|
|
297
303
|
if len(messages) > 0 and messages[-1].role == "model":
|
|
298
304
|
message_to_append = messages[-1]
|
|
299
|
-
|
|
305
|
+
if result_type is None or result_type == "str":
|
|
306
|
+
BaseLLMClient._append_to_message(message_to_append, response_text, is_thought)
|
|
307
|
+
else: # json, pydantic model
|
|
308
|
+
message_to_append.parts = [Part(text=response_text, thought=is_thought)]
|
|
300
309
|
else:
|
|
301
310
|
messages.append(Content(parts=[Part(text=response_text, thought=is_thought)], role="model"))
|
|
302
311
|
return messages[-1]
|
|
@@ -527,7 +536,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
527
536
|
finish_reason = response.candidates[0].finish_reason.value if response.candidates and response.candidates[0].finish_reason else None
|
|
528
537
|
if autocomplete:
|
|
529
538
|
while response.candidates and finish_reason == FinishReason.MAX_TOKENS.value:
|
|
530
|
-
BaseLLMClient._append_generated_part(messages, response)
|
|
539
|
+
BaseLLMClient._append_generated_part(messages, response, result_type)
|
|
531
540
|
|
|
532
541
|
response = await self._create(
|
|
533
542
|
messages=messages,
|
|
@@ -544,7 +553,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
544
553
|
if max_tokens is not None and total_count >= max_tokens:
|
|
545
554
|
break
|
|
546
555
|
if response.candidates and response.candidates[0].content:
|
|
547
|
-
appended_message = BaseLLMClient._append_generated_part(messages, response)
|
|
556
|
+
appended_message = BaseLLMClient._append_generated_part(messages, response, result_type)
|
|
548
557
|
if appended_message is not None:
|
|
549
558
|
response.candidates[0].content = appended_message
|
|
550
559
|
return response
|
|
@@ -595,6 +604,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
595
604
|
tools: None = None,
|
|
596
605
|
tool_choice_mode: Literal["NONE"] = "NONE",
|
|
597
606
|
autocomplete: bool = False,
|
|
607
|
+
raise_on_json_error: bool = True,
|
|
598
608
|
) -> Json: ...
|
|
599
609
|
@overload
|
|
600
610
|
async def create_value(
|
|
@@ -637,6 +647,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
637
647
|
tools: list[Tool] | None = None,
|
|
638
648
|
tool_choice_mode: Literal["ANY", "NONE"] = "NONE",
|
|
639
649
|
autocomplete: bool = False,
|
|
650
|
+
raise_on_json_error: bool = True,
|
|
640
651
|
):
|
|
641
652
|
if result_type == "tools":
|
|
642
653
|
response = await self._create(
|
|
@@ -671,7 +682,10 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
671
682
|
return response.text
|
|
672
683
|
else:
|
|
673
684
|
if result_type == "json" and response.parsed is None:
|
|
674
|
-
|
|
685
|
+
text = response.text
|
|
686
|
+
response.parsed = BaseLLMClient.as_json(text, raise_on_json_error)
|
|
687
|
+
if response.parsed is None:
|
|
688
|
+
return text
|
|
675
689
|
return response.parsed
|
|
676
690
|
|
|
677
691
|
@logfire_decorators.create_stream_async
|
|
@@ -7,7 +7,7 @@ from tenacity import RetryError
|
|
|
7
7
|
from google.genai import Client, types
|
|
8
8
|
|
|
9
9
|
from promptbuilder.llm_client.base_client import BaseLLMClient, BaseLLMClientAsync, ResultType
|
|
10
|
-
from promptbuilder.llm_client.types import Response, Content, Part, ThinkingConfig, Tool, ToolConfig, Model
|
|
10
|
+
from promptbuilder.llm_client.types import Response, Content, Part, ThinkingConfig, Tool, ToolConfig, Model, FunctionCall, FunctionResponse, Blob, FunctionDeclaration, Schema, FunctionCallingConfig, PartLike
|
|
11
11
|
from promptbuilder.llm_client.config import DecoratorConfigs
|
|
12
12
|
from promptbuilder.llm_client.utils import inherited_decorator
|
|
13
13
|
from promptbuilder.llm_client.exceptions import APIError
|
|
@@ -16,6 +16,161 @@ from promptbuilder.llm_client.exceptions import APIError
|
|
|
16
16
|
P = ParamSpec("P")
|
|
17
17
|
|
|
18
18
|
|
|
19
|
+
def _convert_blob_to_genai(blob: Blob | None) -> types.Blob | None:
|
|
20
|
+
"""Convert custom Blob to google.genai.types.Blob"""
|
|
21
|
+
if blob is None:
|
|
22
|
+
return None
|
|
23
|
+
return types.Blob.model_construct(**blob.__dict__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _convert_function_call_to_genai(fc: FunctionCall | None) -> types.FunctionCall | None:
|
|
27
|
+
"""Convert custom FunctionCall to google.genai.types.FunctionCall"""
|
|
28
|
+
if fc is None:
|
|
29
|
+
return None
|
|
30
|
+
return types.FunctionCall.model_construct(**fc.__dict__)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _convert_function_response_to_genai(fr: FunctionResponse | None) -> types.FunctionResponse | None:
|
|
34
|
+
"""Convert custom FunctionResponse to google.genai.types.FunctionResponse"""
|
|
35
|
+
if fr is None:
|
|
36
|
+
return None
|
|
37
|
+
return types.FunctionResponse.model_construct(**fr.__dict__)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _convert_part_to_genai(part: Part | PartLike) -> types.Part:
|
|
41
|
+
"""Convert custom Part or PartLike to google.genai.types.Part"""
|
|
42
|
+
# Handle inline_data conversion
|
|
43
|
+
inline_data = None
|
|
44
|
+
if part.inline_data is not None:
|
|
45
|
+
if isinstance(part.inline_data, Blob):
|
|
46
|
+
inline_data = _convert_blob_to_genai(part.inline_data)
|
|
47
|
+
else:
|
|
48
|
+
# It's already a types.Blob or compatible object
|
|
49
|
+
inline_data = types.Blob.model_construct(**part.inline_data.__dict__)
|
|
50
|
+
|
|
51
|
+
# Handle function_call conversion
|
|
52
|
+
function_call = None
|
|
53
|
+
if part.function_call is not None:
|
|
54
|
+
if isinstance(part.function_call, FunctionCall):
|
|
55
|
+
function_call = _convert_function_call_to_genai(part.function_call)
|
|
56
|
+
else:
|
|
57
|
+
# It's already a compatible type
|
|
58
|
+
function_call = types.FunctionCall.model_construct(**part.function_call.__dict__)
|
|
59
|
+
|
|
60
|
+
# Handle function_response conversion
|
|
61
|
+
function_response = None
|
|
62
|
+
if part.function_response is not None:
|
|
63
|
+
if isinstance(part.function_response, FunctionResponse):
|
|
64
|
+
function_response = _convert_function_response_to_genai(part.function_response)
|
|
65
|
+
else:
|
|
66
|
+
# It's already a compatible type
|
|
67
|
+
function_response = types.FunctionResponse.model_construct(**part.function_response.__dict__)
|
|
68
|
+
|
|
69
|
+
return types.Part.model_construct(
|
|
70
|
+
text=part.text,
|
|
71
|
+
function_call=function_call,
|
|
72
|
+
function_response=function_response,
|
|
73
|
+
thought=part.thought,
|
|
74
|
+
inline_data=inline_data,
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _convert_content_to_genai(content: Content) -> types.Content:
|
|
79
|
+
"""Convert custom Content to google.genai.types.Content"""
|
|
80
|
+
genai_parts: list[types.Part] | None = None
|
|
81
|
+
if content.parts is not None:
|
|
82
|
+
genai_parts = [_convert_part_to_genai(p) for p in content.parts]
|
|
83
|
+
return types.Content.model_construct(
|
|
84
|
+
role=content.role,
|
|
85
|
+
parts=genai_parts,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def _convert_messages_to_genai(messages: list[Content]) -> list[types.Content]:
|
|
90
|
+
"""Convert list of custom Content to list of google.genai.types.Content"""
|
|
91
|
+
return [_convert_content_to_genai(msg) for msg in messages]
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def _convert_thinking_config_to_genai(thinking_config: ThinkingConfig | None) -> types.ThinkingConfig | None:
|
|
95
|
+
"""Convert custom ThinkingConfig to google.genai.types.ThinkingConfig"""
|
|
96
|
+
if thinking_config is None:
|
|
97
|
+
return None
|
|
98
|
+
return types.ThinkingConfig.model_construct(**thinking_config.__dict__)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _convert_schema_to_genai(schema: Schema | None) -> types.Schema | None:
|
|
102
|
+
"""Convert custom Schema to google.genai.types.Schema"""
|
|
103
|
+
if schema is None:
|
|
104
|
+
return None
|
|
105
|
+
return types.Schema.model_construct(
|
|
106
|
+
example=schema.example,
|
|
107
|
+
pattern=schema.pattern,
|
|
108
|
+
minimum=schema.minimum,
|
|
109
|
+
default=schema.default,
|
|
110
|
+
any_of=[_convert_schema_to_genai(s) for s in schema.any_of] if schema.any_of else None,
|
|
111
|
+
max_length=schema.max_length,
|
|
112
|
+
title=schema.title,
|
|
113
|
+
min_length=schema.min_length,
|
|
114
|
+
min_properties=schema.min_properties,
|
|
115
|
+
maximum=schema.maximum,
|
|
116
|
+
max_properties=schema.max_properties,
|
|
117
|
+
description=schema.description,
|
|
118
|
+
enum=schema.enum,
|
|
119
|
+
format=schema.format,
|
|
120
|
+
items=_convert_schema_to_genai(schema.items),
|
|
121
|
+
max_items=schema.max_items,
|
|
122
|
+
min_items=schema.min_items,
|
|
123
|
+
nullable=schema.nullable,
|
|
124
|
+
properties={k: _convert_schema_to_genai(v) for k, v in schema.properties.items()} if schema.properties else None,
|
|
125
|
+
property_ordering=schema.property_ordering,
|
|
126
|
+
required=schema.required,
|
|
127
|
+
type=schema.type,
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def _convert_function_declaration_to_genai(fd: FunctionDeclaration) -> types.FunctionDeclaration:
|
|
132
|
+
"""Convert custom FunctionDeclaration to google.genai.types.FunctionDeclaration"""
|
|
133
|
+
return types.FunctionDeclaration.model_construct(
|
|
134
|
+
response=_convert_schema_to_genai(fd.response),
|
|
135
|
+
description=fd.description,
|
|
136
|
+
name=fd.name,
|
|
137
|
+
parameters=_convert_schema_to_genai(fd.parameters),
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _convert_tool_to_genai(tool: Tool) -> types.Tool:
|
|
142
|
+
"""Convert custom Tool to google.genai.types.Tool"""
|
|
143
|
+
genai_declarations = None
|
|
144
|
+
if tool.function_declarations is not None:
|
|
145
|
+
genai_declarations = [_convert_function_declaration_to_genai(fd) for fd in tool.function_declarations]
|
|
146
|
+
return types.Tool.model_construct(
|
|
147
|
+
function_declarations=genai_declarations,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _convert_tools_to_genai(tools: list[Tool] | None) -> list[types.Tool] | None:
|
|
152
|
+
"""Convert list of custom Tool to list of google.genai.types.Tool"""
|
|
153
|
+
if tools is None:
|
|
154
|
+
return None
|
|
155
|
+
return [_convert_tool_to_genai(t) for t in tools]
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def _convert_function_calling_config_to_genai(fcc: FunctionCallingConfig | None) -> types.FunctionCallingConfig | None:
|
|
159
|
+
"""Convert custom FunctionCallingConfig to google.genai.types.FunctionCallingConfig"""
|
|
160
|
+
if fcc is None:
|
|
161
|
+
return None
|
|
162
|
+
return types.FunctionCallingConfig.model_construct(**fcc.__dict__)
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def _convert_tool_config_to_genai(tool_config: ToolConfig | None) -> types.ToolConfig | None:
|
|
166
|
+
"""Convert custom ToolConfig to google.genai.types.ToolConfig"""
|
|
167
|
+
if tool_config is None:
|
|
168
|
+
return None
|
|
169
|
+
return types.ToolConfig.model_construct(
|
|
170
|
+
function_calling_config=_convert_function_calling_config_to_genai(tool_config.function_calling_config),
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
|
|
19
174
|
@inherited_decorator
|
|
20
175
|
def _error_handler(func: Callable[P, Response]) -> Callable[P, Response]:
|
|
21
176
|
"""
|
|
@@ -100,13 +255,18 @@ class GoogleLLMClient(BaseLLMClient):
|
|
|
100
255
|
tool_config: ToolConfig = ToolConfig(),
|
|
101
256
|
) -> Response:
|
|
102
257
|
messages = self._preprocess_messages(messages)
|
|
258
|
+
# Convert custom types to google.genai.types
|
|
259
|
+
genai_messages = _convert_messages_to_genai(messages)
|
|
260
|
+
genai_tools = _convert_tools_to_genai(tools)
|
|
261
|
+
genai_tool_config = _convert_tool_config_to_genai(tool_config)
|
|
262
|
+
|
|
103
263
|
if max_tokens is None:
|
|
104
264
|
max_tokens = self.default_max_tokens
|
|
105
265
|
config = types.GenerateContentConfig(
|
|
106
266
|
system_instruction=system_message,
|
|
107
267
|
max_output_tokens=max_tokens,
|
|
108
|
-
tools=
|
|
109
|
-
tool_config=
|
|
268
|
+
tools=genai_tools,
|
|
269
|
+
tool_config=genai_tool_config,
|
|
110
270
|
)
|
|
111
271
|
if timeout is not None:
|
|
112
272
|
# Google processes timeout via HttpOptions on the request/config
|
|
@@ -114,19 +274,19 @@ class GoogleLLMClient(BaseLLMClient):
|
|
|
114
274
|
|
|
115
275
|
if thinking_config is None:
|
|
116
276
|
thinking_config = self.default_thinking_config
|
|
117
|
-
config.thinking_config = thinking_config
|
|
277
|
+
config.thinking_config = _convert_thinking_config_to_genai(thinking_config)
|
|
118
278
|
|
|
119
279
|
if result_type is None:
|
|
120
280
|
return self.client.models.generate_content(
|
|
121
281
|
model=self.model,
|
|
122
|
-
contents=
|
|
282
|
+
contents=genai_messages,
|
|
123
283
|
config=config,
|
|
124
284
|
)
|
|
125
285
|
elif result_type == "json":
|
|
126
286
|
config.response_mime_type = "application/json"
|
|
127
287
|
response = self.client.models.generate_content(
|
|
128
288
|
model=self.model,
|
|
129
|
-
contents=
|
|
289
|
+
contents=genai_messages,
|
|
130
290
|
config=config,
|
|
131
291
|
)
|
|
132
292
|
response.parsed = BaseLLMClient.as_json(response.text)
|
|
@@ -136,7 +296,7 @@ class GoogleLLMClient(BaseLLMClient):
|
|
|
136
296
|
config.response_schema = result_type
|
|
137
297
|
return self.client.models.generate_content(
|
|
138
298
|
model=self.model,
|
|
139
|
-
contents=
|
|
299
|
+
contents=genai_messages,
|
|
140
300
|
config=config,
|
|
141
301
|
)
|
|
142
302
|
else:
|
|
@@ -151,6 +311,9 @@ class GoogleLLMClient(BaseLLMClient):
|
|
|
151
311
|
system_message: str | None = None,
|
|
152
312
|
max_tokens: int | None = None,
|
|
153
313
|
) -> Iterator[Response]:
|
|
314
|
+
# Convert custom types to google.genai.types
|
|
315
|
+
genai_messages = _convert_messages_to_genai(messages)
|
|
316
|
+
|
|
154
317
|
if max_tokens is None:
|
|
155
318
|
max_tokens = self.default_max_tokens
|
|
156
319
|
config = types.GenerateContentConfig(
|
|
@@ -160,11 +323,11 @@ class GoogleLLMClient(BaseLLMClient):
|
|
|
160
323
|
|
|
161
324
|
if thinking_config is None:
|
|
162
325
|
thinking_config = self.default_thinking_config
|
|
163
|
-
config.thinking_config = thinking_config
|
|
326
|
+
config.thinking_config = _convert_thinking_config_to_genai(thinking_config)
|
|
164
327
|
|
|
165
328
|
response = self.client.models.generate_content_stream(
|
|
166
329
|
model=self.model,
|
|
167
|
-
contents=
|
|
330
|
+
contents=genai_messages,
|
|
168
331
|
config=config,
|
|
169
332
|
)
|
|
170
333
|
return response
|
|
@@ -258,32 +421,37 @@ class GoogleLLMClientAsync(BaseLLMClientAsync):
|
|
|
258
421
|
tool_config: ToolConfig = ToolConfig(),
|
|
259
422
|
) -> Response:
|
|
260
423
|
messages = GoogleLLMClient._preprocess_messages(messages)
|
|
424
|
+
# Convert custom types to google.genai.types
|
|
425
|
+
genai_messages = _convert_messages_to_genai(messages)
|
|
426
|
+
genai_tools = _convert_tools_to_genai(tools)
|
|
427
|
+
genai_tool_config = _convert_tool_config_to_genai(tool_config)
|
|
428
|
+
|
|
261
429
|
if max_tokens is None:
|
|
262
430
|
max_tokens = self.default_max_tokens
|
|
263
431
|
config = types.GenerateContentConfig(
|
|
264
432
|
system_instruction=system_message,
|
|
265
433
|
max_output_tokens=max_tokens,
|
|
266
|
-
tools=
|
|
267
|
-
tool_config=
|
|
434
|
+
tools=genai_tools,
|
|
435
|
+
tool_config=genai_tool_config,
|
|
268
436
|
)
|
|
269
437
|
if timeout is not None:
|
|
270
438
|
config.http_options = types.HttpOptions(timeout=int(timeout * 1_000))
|
|
271
439
|
|
|
272
440
|
if thinking_config is None:
|
|
273
441
|
thinking_config = self.default_thinking_config
|
|
274
|
-
config.thinking_config = thinking_config
|
|
442
|
+
config.thinking_config = _convert_thinking_config_to_genai(thinking_config)
|
|
275
443
|
|
|
276
444
|
if result_type is None:
|
|
277
445
|
return await self.client.aio.models.generate_content(
|
|
278
446
|
model=self.model,
|
|
279
|
-
contents=
|
|
447
|
+
contents=genai_messages,
|
|
280
448
|
config=config,
|
|
281
449
|
)
|
|
282
450
|
elif result_type == "json":
|
|
283
451
|
config.response_mime_type = "application/json"
|
|
284
452
|
return await self.client.aio.models.generate_content(
|
|
285
453
|
model=self.model,
|
|
286
|
-
contents=
|
|
454
|
+
contents=genai_messages,
|
|
287
455
|
config=config,
|
|
288
456
|
)
|
|
289
457
|
elif isinstance(result_type, type(BaseModel)):
|
|
@@ -291,7 +459,7 @@ class GoogleLLMClientAsync(BaseLLMClientAsync):
|
|
|
291
459
|
config.response_schema = result_type
|
|
292
460
|
return await self.client.aio.models.generate_content(
|
|
293
461
|
model=self.model,
|
|
294
|
-
contents=
|
|
462
|
+
contents=genai_messages,
|
|
295
463
|
config=config,
|
|
296
464
|
)
|
|
297
465
|
else:
|
|
@@ -306,6 +474,9 @@ class GoogleLLMClientAsync(BaseLLMClientAsync):
|
|
|
306
474
|
system_message: str | None = None,
|
|
307
475
|
max_tokens: int | None = None,
|
|
308
476
|
) -> AsyncIterator[Response]:
|
|
477
|
+
# Convert custom types to google.genai.types
|
|
478
|
+
genai_messages = _convert_messages_to_genai(messages)
|
|
479
|
+
|
|
309
480
|
if max_tokens is None:
|
|
310
481
|
max_tokens = self.default_max_tokens
|
|
311
482
|
config = types.GenerateContentConfig(
|
|
@@ -315,11 +486,11 @@ class GoogleLLMClientAsync(BaseLLMClientAsync):
|
|
|
315
486
|
|
|
316
487
|
if thinking_config is None:
|
|
317
488
|
thinking_config = self.default_thinking_config
|
|
318
|
-
config.thinking_config = thinking_config
|
|
489
|
+
config.thinking_config = _convert_thinking_config_to_genai(thinking_config)
|
|
319
490
|
|
|
320
491
|
response = await self.client.aio.models.generate_content_stream(
|
|
321
492
|
model=self.model,
|
|
322
|
-
contents=
|
|
493
|
+
contents=genai_messages,
|
|
323
494
|
config=config,
|
|
324
495
|
)
|
|
325
496
|
return response
|
|
@@ -9,19 +9,19 @@ promptbuilder/agent/utils.py,sha256=vTkphKw04v_QDIJtoB2JKK0RGY6iI1t_0LbmuStunzI,
|
|
|
9
9
|
promptbuilder/llm_client/__init__.py,sha256=wJ33cnRtZX_YPsbcGxEu3SEZMOhPX7-fHI59MEPUe7I,517
|
|
10
10
|
promptbuilder/llm_client/aisuite_client.py,sha256=8inY3UoH8o9yEOvRYP6a_8pjGQK0W_f9eV8MmHzpKTU,15641
|
|
11
11
|
promptbuilder/llm_client/anthropic_client.py,sha256=YR1Pc4Fj0WpG7qcQnPLkQMzDsvA7SMvWomFR1oCzMsk,28328
|
|
12
|
-
promptbuilder/llm_client/base_client.py,sha256=
|
|
12
|
+
promptbuilder/llm_client/base_client.py,sha256=tpYnXFxohqoMTTmYUBZrWjRgX__TnD48vym9VHNJplo,40513
|
|
13
13
|
promptbuilder/llm_client/bedrock_client.py,sha256=IQt7Sv_Wt6mg5-bhuyr-Nwjx5Nxk2S8rKEVkfwvWqE0,28183
|
|
14
14
|
promptbuilder/llm_client/config.py,sha256=exQEm35wp7lK5SfXNpN5H9VZEb2LVa4pyZ-cxGt1U-U,1124
|
|
15
15
|
promptbuilder/llm_client/exceptions.py,sha256=t-X7r_a8B1jNu8eEavde1jXu5dz97yV3IG4YHOtgh0Y,4836
|
|
16
|
-
promptbuilder/llm_client/google_client.py,sha256=
|
|
16
|
+
promptbuilder/llm_client/google_client.py,sha256=u7VhtufVvJAZqTbJ4TQYq798CSabCJMfK7VU2HQDM6I,19926
|
|
17
17
|
promptbuilder/llm_client/litellm_client.py,sha256=czQd1DM1mEj_yM5BQz9oIPtoJG_B2WpQXsqkKDmLyXE,36398
|
|
18
18
|
promptbuilder/llm_client/logfire_decorators.py,sha256=7FvOcMwP87rv4CVySujA_mTZpZyIa7tdA81_OWJqQKQ,9314
|
|
19
19
|
promptbuilder/llm_client/main.py,sha256=2Q7J5FwivX2YwvptzoSEtCfvfcI9p5HC55D3mMb2se4,8243
|
|
20
20
|
promptbuilder/llm_client/openai_client.py,sha256=ulZZsHhyRbqG1WRdVJcNHk6r6MFBC5XLGJQ_2648AkA,28206
|
|
21
21
|
promptbuilder/llm_client/types.py,sha256=fnkSMFjK9ViaRQsD6LILpLz8R2_E1TI9efjy8VNO0RQ,8139
|
|
22
22
|
promptbuilder/llm_client/utils.py,sha256=EtJFghpn27eVUXR-rZj8inEk903tk3thM0r19QxWnBM,13496
|
|
23
|
-
promptbuilder-0.4.
|
|
24
|
-
promptbuilder-0.4.
|
|
25
|
-
promptbuilder-0.4.
|
|
26
|
-
promptbuilder-0.4.
|
|
27
|
-
promptbuilder-0.4.
|
|
23
|
+
promptbuilder-0.4.42.dist-info/licenses/LICENSE,sha256=fqXmInzgsvEOIaKSBgcrwKyYCGYF0MKErJ0YivtODcc,1096
|
|
24
|
+
promptbuilder-0.4.42.dist-info/METADATA,sha256=fbd1jsCO9oDZ5Rgh37ITWY5u7wcq48d38yTT4ZFH0O8,3824
|
|
25
|
+
promptbuilder-0.4.42.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
26
|
+
promptbuilder-0.4.42.dist-info/top_level.txt,sha256=UBVcYn4UgrPy3O3fmmnPEU_kieuplBMgheetIMei4EI,14
|
|
27
|
+
promptbuilder-0.4.42.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|