promptbuilder 0.4.40__tar.gz → 0.4.42__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {promptbuilder-0.4.40/promptbuilder.egg-info → promptbuilder-0.4.42}/PKG-INFO +1 -1
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/llm_client/base_client.py +25 -11
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/llm_client/google_client.py +188 -17
- {promptbuilder-0.4.40 → promptbuilder-0.4.42/promptbuilder.egg-info}/PKG-INFO +1 -1
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder.egg-info/SOURCES.txt +2 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/setup.py +1 -1
- promptbuilder-0.4.42/tests/test_google_conversions.py +426 -0
- promptbuilder-0.4.42/tests/test_models_list.py +185 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/LICENSE +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/MANIFEST.in +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/Readme.md +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/__init__.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/agent/__init__.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/agent/agent.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/agent/context.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/agent/tool.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/agent/utils.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/embeddings.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/llm_client/__init__.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/llm_client/aisuite_client.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/llm_client/anthropic_client.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/llm_client/bedrock_client.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/llm_client/config.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/llm_client/exceptions.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/llm_client/litellm_client.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/llm_client/logfire_decorators.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/llm_client/main.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/llm_client/openai_client.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/llm_client/types.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/llm_client/utils.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/prompt_builder.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder.egg-info/dependency_links.txt +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder.egg-info/requires.txt +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder.egg-info/top_level.txt +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/pyproject.toml +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/setup.cfg +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/tests/test_llm_client.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/tests/test_llm_client_async.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/tests/test_timeout_google.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/tests/test_timeout_litellm.py +0 -0
- {promptbuilder-0.4.40 → promptbuilder-0.4.42}/tests/test_timeout_openai.py +0 -0
|
@@ -58,7 +58,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
58
58
|
return self.provider + ":" + self.model
|
|
59
59
|
|
|
60
60
|
@staticmethod
|
|
61
|
-
def as_json(text: str) -> Json:
|
|
61
|
+
def as_json(text: str, raise_on_error: bool = True) -> Json:
|
|
62
62
|
# Remove markdown code block formatting if present
|
|
63
63
|
text = text.strip()
|
|
64
64
|
|
|
@@ -72,7 +72,9 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
72
72
|
try:
|
|
73
73
|
return json.loads(text, strict=False)
|
|
74
74
|
except json.JSONDecodeError as e:
|
|
75
|
-
|
|
75
|
+
if raise_on_error:
|
|
76
|
+
raise ValueError(f"Failed to parse LLM response as JSON:\n{text}")
|
|
77
|
+
return None
|
|
76
78
|
|
|
77
79
|
def create(
|
|
78
80
|
self,
|
|
@@ -109,7 +111,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
109
111
|
finish_reason = response.candidates[0].finish_reason.value if response.candidates and response.candidates[0].finish_reason else None
|
|
110
112
|
if autocomplete:
|
|
111
113
|
while response.candidates and finish_reason == FinishReason.MAX_TOKENS.value:
|
|
112
|
-
BaseLLMClient._append_generated_part(messages, response)
|
|
114
|
+
BaseLLMClient._append_generated_part(messages, response, result_type)
|
|
113
115
|
|
|
114
116
|
response = self._create(
|
|
115
117
|
messages=messages,
|
|
@@ -126,7 +128,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
126
128
|
if max_tokens is not None and total_count >= max_tokens:
|
|
127
129
|
break
|
|
128
130
|
if response.candidates and response.candidates[0].content:
|
|
129
|
-
appended_message = BaseLLMClient._append_generated_part(messages, response)
|
|
131
|
+
appended_message = BaseLLMClient._append_generated_part(messages, response, result_type)
|
|
130
132
|
if appended_message is not None:
|
|
131
133
|
response.candidates[0].content = appended_message
|
|
132
134
|
return response
|
|
@@ -219,6 +221,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
219
221
|
tools: list[Tool] | None = None,
|
|
220
222
|
tool_choice_mode: Literal["ANY", "NONE"] = "NONE",
|
|
221
223
|
autocomplete: bool = False,
|
|
224
|
+
raise_on_json_error: bool = True,
|
|
222
225
|
):
|
|
223
226
|
if result_type == "tools":
|
|
224
227
|
response = self.create(
|
|
@@ -254,7 +257,10 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
254
257
|
return response.text
|
|
255
258
|
else:
|
|
256
259
|
if result_type == "json" and response.parsed is None:
|
|
257
|
-
|
|
260
|
+
text = response.text
|
|
261
|
+
response.parsed = BaseLLMClient.as_json(text, raise_on_json_error)
|
|
262
|
+
if response.parsed is None:
|
|
263
|
+
return text
|
|
258
264
|
return response.parsed
|
|
259
265
|
|
|
260
266
|
@staticmethod
|
|
@@ -280,7 +286,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
280
286
|
return None, None
|
|
281
287
|
|
|
282
288
|
@staticmethod
|
|
283
|
-
def _append_to_message(message: Content, text: str, is_thought: bool):
|
|
289
|
+
def _append_to_message(message: Content, text: str, is_thought: bool | None):
|
|
284
290
|
if message.parts and message.parts[-1].text is not None and message.parts[-1].thought == is_thought:
|
|
285
291
|
message.parts[-1].text += text
|
|
286
292
|
else:
|
|
@@ -289,14 +295,17 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
289
295
|
message.parts.append(Part(text=text, thought=is_thought))
|
|
290
296
|
|
|
291
297
|
@staticmethod
|
|
292
|
-
def _append_generated_part(messages: list[Content], response: Response) -> Content | None:
|
|
298
|
+
def _append_generated_part(messages: list[Content], response: Response, result_type: ResultType = None) -> Content | None:
|
|
293
299
|
response_text, is_thought = BaseLLMClient._responce_to_text(response)
|
|
294
300
|
if response_text is None:
|
|
295
301
|
return None
|
|
296
302
|
|
|
297
303
|
if len(messages) > 0 and messages[-1].role == "model":
|
|
298
304
|
message_to_append = messages[-1]
|
|
299
|
-
|
|
305
|
+
if result_type is None or result_type == "str":
|
|
306
|
+
BaseLLMClient._append_to_message(message_to_append, response_text, is_thought)
|
|
307
|
+
else: # json, pydantic model
|
|
308
|
+
message_to_append.parts = [Part(text=response_text, thought=is_thought)]
|
|
300
309
|
else:
|
|
301
310
|
messages.append(Content(parts=[Part(text=response_text, thought=is_thought)], role="model"))
|
|
302
311
|
return messages[-1]
|
|
@@ -527,7 +536,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
527
536
|
finish_reason = response.candidates[0].finish_reason.value if response.candidates and response.candidates[0].finish_reason else None
|
|
528
537
|
if autocomplete:
|
|
529
538
|
while response.candidates and finish_reason == FinishReason.MAX_TOKENS.value:
|
|
530
|
-
BaseLLMClient._append_generated_part(messages, response)
|
|
539
|
+
BaseLLMClient._append_generated_part(messages, response, result_type)
|
|
531
540
|
|
|
532
541
|
response = await self._create(
|
|
533
542
|
messages=messages,
|
|
@@ -544,7 +553,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
544
553
|
if max_tokens is not None and total_count >= max_tokens:
|
|
545
554
|
break
|
|
546
555
|
if response.candidates and response.candidates[0].content:
|
|
547
|
-
appended_message = BaseLLMClient._append_generated_part(messages, response)
|
|
556
|
+
appended_message = BaseLLMClient._append_generated_part(messages, response, result_type)
|
|
548
557
|
if appended_message is not None:
|
|
549
558
|
response.candidates[0].content = appended_message
|
|
550
559
|
return response
|
|
@@ -595,6 +604,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
595
604
|
tools: None = None,
|
|
596
605
|
tool_choice_mode: Literal["NONE"] = "NONE",
|
|
597
606
|
autocomplete: bool = False,
|
|
607
|
+
raise_on_json_error: bool = True,
|
|
598
608
|
) -> Json: ...
|
|
599
609
|
@overload
|
|
600
610
|
async def create_value(
|
|
@@ -637,6 +647,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
637
647
|
tools: list[Tool] | None = None,
|
|
638
648
|
tool_choice_mode: Literal["ANY", "NONE"] = "NONE",
|
|
639
649
|
autocomplete: bool = False,
|
|
650
|
+
raise_on_json_error: bool = True,
|
|
640
651
|
):
|
|
641
652
|
if result_type == "tools":
|
|
642
653
|
response = await self._create(
|
|
@@ -671,7 +682,10 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
671
682
|
return response.text
|
|
672
683
|
else:
|
|
673
684
|
if result_type == "json" and response.parsed is None:
|
|
674
|
-
|
|
685
|
+
text = response.text
|
|
686
|
+
response.parsed = BaseLLMClient.as_json(text, raise_on_json_error)
|
|
687
|
+
if response.parsed is None:
|
|
688
|
+
return text
|
|
675
689
|
return response.parsed
|
|
676
690
|
|
|
677
691
|
@logfire_decorators.create_stream_async
|
|
@@ -7,7 +7,7 @@ from tenacity import RetryError
|
|
|
7
7
|
from google.genai import Client, types
|
|
8
8
|
|
|
9
9
|
from promptbuilder.llm_client.base_client import BaseLLMClient, BaseLLMClientAsync, ResultType
|
|
10
|
-
from promptbuilder.llm_client.types import Response, Content, Part, ThinkingConfig, Tool, ToolConfig, Model
|
|
10
|
+
from promptbuilder.llm_client.types import Response, Content, Part, ThinkingConfig, Tool, ToolConfig, Model, FunctionCall, FunctionResponse, Blob, FunctionDeclaration, Schema, FunctionCallingConfig, PartLike
|
|
11
11
|
from promptbuilder.llm_client.config import DecoratorConfigs
|
|
12
12
|
from promptbuilder.llm_client.utils import inherited_decorator
|
|
13
13
|
from promptbuilder.llm_client.exceptions import APIError
|
|
@@ -16,6 +16,161 @@ from promptbuilder.llm_client.exceptions import APIError
|
|
|
16
16
|
P = ParamSpec("P")
|
|
17
17
|
|
|
18
18
|
|
|
19
|
+
def _convert_blob_to_genai(blob: Blob | None) -> types.Blob | None:
|
|
20
|
+
"""Convert custom Blob to google.genai.types.Blob"""
|
|
21
|
+
if blob is None:
|
|
22
|
+
return None
|
|
23
|
+
return types.Blob.model_construct(**blob.__dict__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _convert_function_call_to_genai(fc: FunctionCall | None) -> types.FunctionCall | None:
|
|
27
|
+
"""Convert custom FunctionCall to google.genai.types.FunctionCall"""
|
|
28
|
+
if fc is None:
|
|
29
|
+
return None
|
|
30
|
+
return types.FunctionCall.model_construct(**fc.__dict__)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _convert_function_response_to_genai(fr: FunctionResponse | None) -> types.FunctionResponse | None:
|
|
34
|
+
"""Convert custom FunctionResponse to google.genai.types.FunctionResponse"""
|
|
35
|
+
if fr is None:
|
|
36
|
+
return None
|
|
37
|
+
return types.FunctionResponse.model_construct(**fr.__dict__)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _convert_part_to_genai(part: Part | PartLike) -> types.Part:
|
|
41
|
+
"""Convert custom Part or PartLike to google.genai.types.Part"""
|
|
42
|
+
# Handle inline_data conversion
|
|
43
|
+
inline_data = None
|
|
44
|
+
if part.inline_data is not None:
|
|
45
|
+
if isinstance(part.inline_data, Blob):
|
|
46
|
+
inline_data = _convert_blob_to_genai(part.inline_data)
|
|
47
|
+
else:
|
|
48
|
+
# It's already a types.Blob or compatible object
|
|
49
|
+
inline_data = types.Blob.model_construct(**part.inline_data.__dict__)
|
|
50
|
+
|
|
51
|
+
# Handle function_call conversion
|
|
52
|
+
function_call = None
|
|
53
|
+
if part.function_call is not None:
|
|
54
|
+
if isinstance(part.function_call, FunctionCall):
|
|
55
|
+
function_call = _convert_function_call_to_genai(part.function_call)
|
|
56
|
+
else:
|
|
57
|
+
# It's already a compatible type
|
|
58
|
+
function_call = types.FunctionCall.model_construct(**part.function_call.__dict__)
|
|
59
|
+
|
|
60
|
+
# Handle function_response conversion
|
|
61
|
+
function_response = None
|
|
62
|
+
if part.function_response is not None:
|
|
63
|
+
if isinstance(part.function_response, FunctionResponse):
|
|
64
|
+
function_response = _convert_function_response_to_genai(part.function_response)
|
|
65
|
+
else:
|
|
66
|
+
# It's already a compatible type
|
|
67
|
+
function_response = types.FunctionResponse.model_construct(**part.function_response.__dict__)
|
|
68
|
+
|
|
69
|
+
return types.Part.model_construct(
|
|
70
|
+
text=part.text,
|
|
71
|
+
function_call=function_call,
|
|
72
|
+
function_response=function_response,
|
|
73
|
+
thought=part.thought,
|
|
74
|
+
inline_data=inline_data,
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _convert_content_to_genai(content: Content) -> types.Content:
|
|
79
|
+
"""Convert custom Content to google.genai.types.Content"""
|
|
80
|
+
genai_parts: list[types.Part] | None = None
|
|
81
|
+
if content.parts is not None:
|
|
82
|
+
genai_parts = [_convert_part_to_genai(p) for p in content.parts]
|
|
83
|
+
return types.Content.model_construct(
|
|
84
|
+
role=content.role,
|
|
85
|
+
parts=genai_parts,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def _convert_messages_to_genai(messages: list[Content]) -> list[types.Content]:
|
|
90
|
+
"""Convert list of custom Content to list of google.genai.types.Content"""
|
|
91
|
+
return [_convert_content_to_genai(msg) for msg in messages]
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def _convert_thinking_config_to_genai(thinking_config: ThinkingConfig | None) -> types.ThinkingConfig | None:
|
|
95
|
+
"""Convert custom ThinkingConfig to google.genai.types.ThinkingConfig"""
|
|
96
|
+
if thinking_config is None:
|
|
97
|
+
return None
|
|
98
|
+
return types.ThinkingConfig.model_construct(**thinking_config.__dict__)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _convert_schema_to_genai(schema: Schema | None) -> types.Schema | None:
|
|
102
|
+
"""Convert custom Schema to google.genai.types.Schema"""
|
|
103
|
+
if schema is None:
|
|
104
|
+
return None
|
|
105
|
+
return types.Schema.model_construct(
|
|
106
|
+
example=schema.example,
|
|
107
|
+
pattern=schema.pattern,
|
|
108
|
+
minimum=schema.minimum,
|
|
109
|
+
default=schema.default,
|
|
110
|
+
any_of=[_convert_schema_to_genai(s) for s in schema.any_of] if schema.any_of else None,
|
|
111
|
+
max_length=schema.max_length,
|
|
112
|
+
title=schema.title,
|
|
113
|
+
min_length=schema.min_length,
|
|
114
|
+
min_properties=schema.min_properties,
|
|
115
|
+
maximum=schema.maximum,
|
|
116
|
+
max_properties=schema.max_properties,
|
|
117
|
+
description=schema.description,
|
|
118
|
+
enum=schema.enum,
|
|
119
|
+
format=schema.format,
|
|
120
|
+
items=_convert_schema_to_genai(schema.items),
|
|
121
|
+
max_items=schema.max_items,
|
|
122
|
+
min_items=schema.min_items,
|
|
123
|
+
nullable=schema.nullable,
|
|
124
|
+
properties={k: _convert_schema_to_genai(v) for k, v in schema.properties.items()} if schema.properties else None,
|
|
125
|
+
property_ordering=schema.property_ordering,
|
|
126
|
+
required=schema.required,
|
|
127
|
+
type=schema.type,
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def _convert_function_declaration_to_genai(fd: FunctionDeclaration) -> types.FunctionDeclaration:
|
|
132
|
+
"""Convert custom FunctionDeclaration to google.genai.types.FunctionDeclaration"""
|
|
133
|
+
return types.FunctionDeclaration.model_construct(
|
|
134
|
+
response=_convert_schema_to_genai(fd.response),
|
|
135
|
+
description=fd.description,
|
|
136
|
+
name=fd.name,
|
|
137
|
+
parameters=_convert_schema_to_genai(fd.parameters),
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _convert_tool_to_genai(tool: Tool) -> types.Tool:
|
|
142
|
+
"""Convert custom Tool to google.genai.types.Tool"""
|
|
143
|
+
genai_declarations = None
|
|
144
|
+
if tool.function_declarations is not None:
|
|
145
|
+
genai_declarations = [_convert_function_declaration_to_genai(fd) for fd in tool.function_declarations]
|
|
146
|
+
return types.Tool.model_construct(
|
|
147
|
+
function_declarations=genai_declarations,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _convert_tools_to_genai(tools: list[Tool] | None) -> list[types.Tool] | None:
|
|
152
|
+
"""Convert list of custom Tool to list of google.genai.types.Tool"""
|
|
153
|
+
if tools is None:
|
|
154
|
+
return None
|
|
155
|
+
return [_convert_tool_to_genai(t) for t in tools]
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def _convert_function_calling_config_to_genai(fcc: FunctionCallingConfig | None) -> types.FunctionCallingConfig | None:
|
|
159
|
+
"""Convert custom FunctionCallingConfig to google.genai.types.FunctionCallingConfig"""
|
|
160
|
+
if fcc is None:
|
|
161
|
+
return None
|
|
162
|
+
return types.FunctionCallingConfig.model_construct(**fcc.__dict__)
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def _convert_tool_config_to_genai(tool_config: ToolConfig | None) -> types.ToolConfig | None:
|
|
166
|
+
"""Convert custom ToolConfig to google.genai.types.ToolConfig"""
|
|
167
|
+
if tool_config is None:
|
|
168
|
+
return None
|
|
169
|
+
return types.ToolConfig.model_construct(
|
|
170
|
+
function_calling_config=_convert_function_calling_config_to_genai(tool_config.function_calling_config),
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
|
|
19
174
|
@inherited_decorator
|
|
20
175
|
def _error_handler(func: Callable[P, Response]) -> Callable[P, Response]:
|
|
21
176
|
"""
|
|
@@ -100,13 +255,18 @@ class GoogleLLMClient(BaseLLMClient):
|
|
|
100
255
|
tool_config: ToolConfig = ToolConfig(),
|
|
101
256
|
) -> Response:
|
|
102
257
|
messages = self._preprocess_messages(messages)
|
|
258
|
+
# Convert custom types to google.genai.types
|
|
259
|
+
genai_messages = _convert_messages_to_genai(messages)
|
|
260
|
+
genai_tools = _convert_tools_to_genai(tools)
|
|
261
|
+
genai_tool_config = _convert_tool_config_to_genai(tool_config)
|
|
262
|
+
|
|
103
263
|
if max_tokens is None:
|
|
104
264
|
max_tokens = self.default_max_tokens
|
|
105
265
|
config = types.GenerateContentConfig(
|
|
106
266
|
system_instruction=system_message,
|
|
107
267
|
max_output_tokens=max_tokens,
|
|
108
|
-
tools=
|
|
109
|
-
tool_config=
|
|
268
|
+
tools=genai_tools,
|
|
269
|
+
tool_config=genai_tool_config,
|
|
110
270
|
)
|
|
111
271
|
if timeout is not None:
|
|
112
272
|
# Google processes timeout via HttpOptions on the request/config
|
|
@@ -114,19 +274,19 @@ class GoogleLLMClient(BaseLLMClient):
|
|
|
114
274
|
|
|
115
275
|
if thinking_config is None:
|
|
116
276
|
thinking_config = self.default_thinking_config
|
|
117
|
-
config.thinking_config = thinking_config
|
|
277
|
+
config.thinking_config = _convert_thinking_config_to_genai(thinking_config)
|
|
118
278
|
|
|
119
279
|
if result_type is None:
|
|
120
280
|
return self.client.models.generate_content(
|
|
121
281
|
model=self.model,
|
|
122
|
-
contents=
|
|
282
|
+
contents=genai_messages,
|
|
123
283
|
config=config,
|
|
124
284
|
)
|
|
125
285
|
elif result_type == "json":
|
|
126
286
|
config.response_mime_type = "application/json"
|
|
127
287
|
response = self.client.models.generate_content(
|
|
128
288
|
model=self.model,
|
|
129
|
-
contents=
|
|
289
|
+
contents=genai_messages,
|
|
130
290
|
config=config,
|
|
131
291
|
)
|
|
132
292
|
response.parsed = BaseLLMClient.as_json(response.text)
|
|
@@ -136,7 +296,7 @@ class GoogleLLMClient(BaseLLMClient):
|
|
|
136
296
|
config.response_schema = result_type
|
|
137
297
|
return self.client.models.generate_content(
|
|
138
298
|
model=self.model,
|
|
139
|
-
contents=
|
|
299
|
+
contents=genai_messages,
|
|
140
300
|
config=config,
|
|
141
301
|
)
|
|
142
302
|
else:
|
|
@@ -151,6 +311,9 @@ class GoogleLLMClient(BaseLLMClient):
|
|
|
151
311
|
system_message: str | None = None,
|
|
152
312
|
max_tokens: int | None = None,
|
|
153
313
|
) -> Iterator[Response]:
|
|
314
|
+
# Convert custom types to google.genai.types
|
|
315
|
+
genai_messages = _convert_messages_to_genai(messages)
|
|
316
|
+
|
|
154
317
|
if max_tokens is None:
|
|
155
318
|
max_tokens = self.default_max_tokens
|
|
156
319
|
config = types.GenerateContentConfig(
|
|
@@ -160,11 +323,11 @@ class GoogleLLMClient(BaseLLMClient):
|
|
|
160
323
|
|
|
161
324
|
if thinking_config is None:
|
|
162
325
|
thinking_config = self.default_thinking_config
|
|
163
|
-
config.thinking_config = thinking_config
|
|
326
|
+
config.thinking_config = _convert_thinking_config_to_genai(thinking_config)
|
|
164
327
|
|
|
165
328
|
response = self.client.models.generate_content_stream(
|
|
166
329
|
model=self.model,
|
|
167
|
-
contents=
|
|
330
|
+
contents=genai_messages,
|
|
168
331
|
config=config,
|
|
169
332
|
)
|
|
170
333
|
return response
|
|
@@ -258,32 +421,37 @@ class GoogleLLMClientAsync(BaseLLMClientAsync):
|
|
|
258
421
|
tool_config: ToolConfig = ToolConfig(),
|
|
259
422
|
) -> Response:
|
|
260
423
|
messages = GoogleLLMClient._preprocess_messages(messages)
|
|
424
|
+
# Convert custom types to google.genai.types
|
|
425
|
+
genai_messages = _convert_messages_to_genai(messages)
|
|
426
|
+
genai_tools = _convert_tools_to_genai(tools)
|
|
427
|
+
genai_tool_config = _convert_tool_config_to_genai(tool_config)
|
|
428
|
+
|
|
261
429
|
if max_tokens is None:
|
|
262
430
|
max_tokens = self.default_max_tokens
|
|
263
431
|
config = types.GenerateContentConfig(
|
|
264
432
|
system_instruction=system_message,
|
|
265
433
|
max_output_tokens=max_tokens,
|
|
266
|
-
tools=
|
|
267
|
-
tool_config=
|
|
434
|
+
tools=genai_tools,
|
|
435
|
+
tool_config=genai_tool_config,
|
|
268
436
|
)
|
|
269
437
|
if timeout is not None:
|
|
270
438
|
config.http_options = types.HttpOptions(timeout=int(timeout * 1_000))
|
|
271
439
|
|
|
272
440
|
if thinking_config is None:
|
|
273
441
|
thinking_config = self.default_thinking_config
|
|
274
|
-
config.thinking_config = thinking_config
|
|
442
|
+
config.thinking_config = _convert_thinking_config_to_genai(thinking_config)
|
|
275
443
|
|
|
276
444
|
if result_type is None:
|
|
277
445
|
return await self.client.aio.models.generate_content(
|
|
278
446
|
model=self.model,
|
|
279
|
-
contents=
|
|
447
|
+
contents=genai_messages,
|
|
280
448
|
config=config,
|
|
281
449
|
)
|
|
282
450
|
elif result_type == "json":
|
|
283
451
|
config.response_mime_type = "application/json"
|
|
284
452
|
return await self.client.aio.models.generate_content(
|
|
285
453
|
model=self.model,
|
|
286
|
-
contents=
|
|
454
|
+
contents=genai_messages,
|
|
287
455
|
config=config,
|
|
288
456
|
)
|
|
289
457
|
elif isinstance(result_type, type(BaseModel)):
|
|
@@ -291,7 +459,7 @@ class GoogleLLMClientAsync(BaseLLMClientAsync):
|
|
|
291
459
|
config.response_schema = result_type
|
|
292
460
|
return await self.client.aio.models.generate_content(
|
|
293
461
|
model=self.model,
|
|
294
|
-
contents=
|
|
462
|
+
contents=genai_messages,
|
|
295
463
|
config=config,
|
|
296
464
|
)
|
|
297
465
|
else:
|
|
@@ -306,6 +474,9 @@ class GoogleLLMClientAsync(BaseLLMClientAsync):
|
|
|
306
474
|
system_message: str | None = None,
|
|
307
475
|
max_tokens: int | None = None,
|
|
308
476
|
) -> AsyncIterator[Response]:
|
|
477
|
+
# Convert custom types to google.genai.types
|
|
478
|
+
genai_messages = _convert_messages_to_genai(messages)
|
|
479
|
+
|
|
309
480
|
if max_tokens is None:
|
|
310
481
|
max_tokens = self.default_max_tokens
|
|
311
482
|
config = types.GenerateContentConfig(
|
|
@@ -315,11 +486,11 @@ class GoogleLLMClientAsync(BaseLLMClientAsync):
|
|
|
315
486
|
|
|
316
487
|
if thinking_config is None:
|
|
317
488
|
thinking_config = self.default_thinking_config
|
|
318
|
-
config.thinking_config = thinking_config
|
|
489
|
+
config.thinking_config = _convert_thinking_config_to_genai(thinking_config)
|
|
319
490
|
|
|
320
491
|
response = await self.client.aio.models.generate_content_stream(
|
|
321
492
|
model=self.model,
|
|
322
|
-
contents=
|
|
493
|
+
contents=genai_messages,
|
|
323
494
|
config=config,
|
|
324
495
|
)
|
|
325
496
|
return response
|
|
@@ -30,8 +30,10 @@ promptbuilder/llm_client/main.py
|
|
|
30
30
|
promptbuilder/llm_client/openai_client.py
|
|
31
31
|
promptbuilder/llm_client/types.py
|
|
32
32
|
promptbuilder/llm_client/utils.py
|
|
33
|
+
tests/test_google_conversions.py
|
|
33
34
|
tests/test_llm_client.py
|
|
34
35
|
tests/test_llm_client_async.py
|
|
36
|
+
tests/test_models_list.py
|
|
35
37
|
tests/test_timeout_google.py
|
|
36
38
|
tests/test_timeout_litellm.py
|
|
37
39
|
tests/test_timeout_openai.py
|
|
@@ -0,0 +1,426 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tests for conversion functions in google_client.py
|
|
3
|
+
Verifies that custom types' __dict__ contains exactly the fields needed for google.genai.types
|
|
4
|
+
"""
|
|
5
|
+
import pytest
|
|
6
|
+
from google.genai import types as genai_types
|
|
7
|
+
|
|
8
|
+
from promptbuilder.llm_client.types import (
|
|
9
|
+
Blob,
|
|
10
|
+
FunctionCall,
|
|
11
|
+
FunctionResponse,
|
|
12
|
+
Part,
|
|
13
|
+
Content,
|
|
14
|
+
ThinkingConfig,
|
|
15
|
+
Schema,
|
|
16
|
+
FunctionDeclaration,
|
|
17
|
+
Tool,
|
|
18
|
+
FunctionCallingConfig,
|
|
19
|
+
ToolConfig,
|
|
20
|
+
)
|
|
21
|
+
from promptbuilder.llm_client.google_client import (
|
|
22
|
+
_convert_blob_to_genai,
|
|
23
|
+
_convert_function_call_to_genai,
|
|
24
|
+
_convert_function_response_to_genai,
|
|
25
|
+
_convert_part_to_genai,
|
|
26
|
+
_convert_content_to_genai,
|
|
27
|
+
_convert_thinking_config_to_genai,
|
|
28
|
+
_convert_schema_to_genai,
|
|
29
|
+
_convert_function_declaration_to_genai,
|
|
30
|
+
_convert_tool_to_genai,
|
|
31
|
+
_convert_function_calling_config_to_genai,
|
|
32
|
+
_convert_tool_config_to_genai,
|
|
33
|
+
_convert_messages_to_genai,
|
|
34
|
+
_convert_tools_to_genai,
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def get_pydantic_fields(cls) -> set[str]:
|
|
39
|
+
"""Get field names from a Pydantic model class using the non-deprecated approach."""
|
|
40
|
+
return set(cls.__pydantic_fields__.keys())
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class TestFieldCompatibility:
|
|
44
|
+
"""Test that custom types have compatible fields with google.genai.types"""
|
|
45
|
+
|
|
46
|
+
def test_blob_fields_match(self):
|
|
47
|
+
"""Blob __dict__ should have fields compatible with genai_types.Blob"""
|
|
48
|
+
custom_fields = get_pydantic_fields(Blob)
|
|
49
|
+
genai_fields = get_pydantic_fields(genai_types.Blob)
|
|
50
|
+
|
|
51
|
+
# Our custom type may have extra fields (like display_name) that genai doesn't have
|
|
52
|
+
# But all genai fields should be in our custom type or we handle them
|
|
53
|
+
blob = Blob(data=b"test", mime_type="text/plain", display_name="test.txt")
|
|
54
|
+
|
|
55
|
+
# Check that conversion works
|
|
56
|
+
result = _convert_blob_to_genai(blob)
|
|
57
|
+
assert isinstance(result, genai_types.Blob)
|
|
58
|
+
assert result.data == b"test"
|
|
59
|
+
assert result.mime_type == "text/plain"
|
|
60
|
+
|
|
61
|
+
def test_function_call_fields_match(self):
|
|
62
|
+
"""FunctionCall __dict__ should have fields compatible with genai_types.FunctionCall"""
|
|
63
|
+
custom_fields = get_pydantic_fields(FunctionCall)
|
|
64
|
+
genai_fields = get_pydantic_fields(genai_types.FunctionCall)
|
|
65
|
+
|
|
66
|
+
fc = FunctionCall(id="123", args={"key": "value"}, name="test_func")
|
|
67
|
+
|
|
68
|
+
result = _convert_function_call_to_genai(fc)
|
|
69
|
+
assert isinstance(result, genai_types.FunctionCall)
|
|
70
|
+
assert result.id == "123"
|
|
71
|
+
assert result.args == {"key": "value"}
|
|
72
|
+
assert result.name == "test_func"
|
|
73
|
+
|
|
74
|
+
def test_function_response_fields_match(self):
|
|
75
|
+
"""FunctionResponse __dict__ should have fields compatible with genai_types.FunctionResponse"""
|
|
76
|
+
fr = FunctionResponse(id="456", name="test_func", response={"result": 42})
|
|
77
|
+
|
|
78
|
+
result = _convert_function_response_to_genai(fr)
|
|
79
|
+
assert isinstance(result, genai_types.FunctionResponse)
|
|
80
|
+
assert result.id == "456"
|
|
81
|
+
assert result.name == "test_func"
|
|
82
|
+
assert result.response == {"result": 42}
|
|
83
|
+
|
|
84
|
+
def test_thinking_config_fields_match(self):
|
|
85
|
+
"""ThinkingConfig __dict__ should have fields compatible with genai_types.ThinkingConfig"""
|
|
86
|
+
tc = ThinkingConfig(include_thoughts=True, thinking_budget=1000)
|
|
87
|
+
|
|
88
|
+
result = _convert_thinking_config_to_genai(tc)
|
|
89
|
+
assert isinstance(result, genai_types.ThinkingConfig)
|
|
90
|
+
assert result.include_thoughts == True
|
|
91
|
+
assert result.thinking_budget == 1000
|
|
92
|
+
|
|
93
|
+
def test_function_calling_config_fields_match(self):
|
|
94
|
+
"""FunctionCallingConfig __dict__ should have fields compatible with genai_types.FunctionCallingConfig"""
|
|
95
|
+
fcc = FunctionCallingConfig(mode="AUTO", allowed_function_names=["func1", "func2"])
|
|
96
|
+
|
|
97
|
+
result = _convert_function_calling_config_to_genai(fcc)
|
|
98
|
+
assert isinstance(result, genai_types.FunctionCallingConfig)
|
|
99
|
+
assert result.mode == "AUTO"
|
|
100
|
+
assert result.allowed_function_names == ["func1", "func2"]
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class TestConversionFunctions:
|
|
104
|
+
"""Test that conversion functions produce correct results"""
|
|
105
|
+
|
|
106
|
+
def test_convert_blob_none(self):
|
|
107
|
+
assert _convert_blob_to_genai(None) is None
|
|
108
|
+
|
|
109
|
+
def test_convert_blob_with_data(self):
|
|
110
|
+
blob = Blob(data=b"hello", mime_type="text/plain")
|
|
111
|
+
result = _convert_blob_to_genai(blob)
|
|
112
|
+
assert isinstance(result, genai_types.Blob)
|
|
113
|
+
assert result.data == b"hello"
|
|
114
|
+
assert result.mime_type == "text/plain"
|
|
115
|
+
|
|
116
|
+
def test_convert_function_call_none(self):
|
|
117
|
+
assert _convert_function_call_to_genai(None) is None
|
|
118
|
+
|
|
119
|
+
def test_convert_function_call_with_data(self):
|
|
120
|
+
fc = FunctionCall(id="test-id", name="my_function", args={"x": 1})
|
|
121
|
+
result = _convert_function_call_to_genai(fc)
|
|
122
|
+
assert isinstance(result, genai_types.FunctionCall)
|
|
123
|
+
assert result.id == "test-id"
|
|
124
|
+
assert result.name == "my_function"
|
|
125
|
+
assert result.args == {"x": 1}
|
|
126
|
+
|
|
127
|
+
def test_convert_function_response_none(self):
|
|
128
|
+
assert _convert_function_response_to_genai(None) is None
|
|
129
|
+
|
|
130
|
+
def test_convert_function_response_with_data(self):
|
|
131
|
+
fr = FunctionResponse(id="resp-id", name="my_function", response={"output": "success"})
|
|
132
|
+
result = _convert_function_response_to_genai(fr)
|
|
133
|
+
assert isinstance(result, genai_types.FunctionResponse)
|
|
134
|
+
assert result.id == "resp-id"
|
|
135
|
+
assert result.name == "my_function"
|
|
136
|
+
assert result.response == {"output": "success"}
|
|
137
|
+
|
|
138
|
+
def test_convert_part_text_only(self):
|
|
139
|
+
part = Part(text="Hello world")
|
|
140
|
+
result = _convert_part_to_genai(part)
|
|
141
|
+
assert isinstance(result, genai_types.Part)
|
|
142
|
+
assert result.text == "Hello world"
|
|
143
|
+
assert result.function_call is None
|
|
144
|
+
assert result.function_response is None
|
|
145
|
+
assert result.inline_data is None
|
|
146
|
+
|
|
147
|
+
def test_convert_part_with_function_call(self):
|
|
148
|
+
fc = FunctionCall(id="fc-1", name="calc", args={"a": 1, "b": 2})
|
|
149
|
+
part = Part(function_call=fc)
|
|
150
|
+
result = _convert_part_to_genai(part)
|
|
151
|
+
assert isinstance(result, genai_types.Part)
|
|
152
|
+
assert result.function_call is not None
|
|
153
|
+
assert isinstance(result.function_call, genai_types.FunctionCall)
|
|
154
|
+
assert result.function_call.name == "calc"
|
|
155
|
+
|
|
156
|
+
def test_convert_part_with_inline_data(self):
|
|
157
|
+
blob = Blob(data=b"image data", mime_type="image/png")
|
|
158
|
+
part = Part(inline_data=blob)
|
|
159
|
+
result = _convert_part_to_genai(part)
|
|
160
|
+
assert isinstance(result, genai_types.Part)
|
|
161
|
+
assert result.inline_data is not None
|
|
162
|
+
assert isinstance(result.inline_data, genai_types.Blob)
|
|
163
|
+
assert result.inline_data.mime_type == "image/png"
|
|
164
|
+
|
|
165
|
+
def test_convert_part_with_thought(self):
|
|
166
|
+
part = Part(text="thinking...", thought=True)
|
|
167
|
+
result = _convert_part_to_genai(part)
|
|
168
|
+
assert isinstance(result, genai_types.Part)
|
|
169
|
+
assert result.text == "thinking..."
|
|
170
|
+
assert result.thought == True
|
|
171
|
+
|
|
172
|
+
def test_convert_content_simple(self):
|
|
173
|
+
content = Content(role="user", parts=[Part(text="Hello")])
|
|
174
|
+
result = _convert_content_to_genai(content)
|
|
175
|
+
assert isinstance(result, genai_types.Content)
|
|
176
|
+
assert result.role == "user"
|
|
177
|
+
assert len(result.parts) == 1
|
|
178
|
+
assert isinstance(result.parts[0], genai_types.Part)
|
|
179
|
+
assert result.parts[0].text == "Hello"
|
|
180
|
+
|
|
181
|
+
def test_convert_content_multiple_parts(self):
|
|
182
|
+
content = Content(
|
|
183
|
+
role="model",
|
|
184
|
+
parts=[
|
|
185
|
+
Part(text="Part 1"),
|
|
186
|
+
Part(text="Part 2"),
|
|
187
|
+
]
|
|
188
|
+
)
|
|
189
|
+
result = _convert_content_to_genai(content)
|
|
190
|
+
assert isinstance(result, genai_types.Content)
|
|
191
|
+
assert result.role == "model"
|
|
192
|
+
assert len(result.parts) == 2
|
|
193
|
+
|
|
194
|
+
def test_convert_content_no_parts(self):
|
|
195
|
+
content = Content(role="user", parts=None)
|
|
196
|
+
result = _convert_content_to_genai(content)
|
|
197
|
+
assert isinstance(result, genai_types.Content)
|
|
198
|
+
assert result.parts is None
|
|
199
|
+
|
|
200
|
+
def test_convert_messages(self):
|
|
201
|
+
messages = [
|
|
202
|
+
Content(role="user", parts=[Part(text="Hi")]),
|
|
203
|
+
Content(role="model", parts=[Part(text="Hello!")]),
|
|
204
|
+
]
|
|
205
|
+
result = _convert_messages_to_genai(messages)
|
|
206
|
+
assert len(result) == 2
|
|
207
|
+
assert all(isinstance(m, genai_types.Content) for m in result)
|
|
208
|
+
|
|
209
|
+
def test_convert_thinking_config_none(self):
|
|
210
|
+
assert _convert_thinking_config_to_genai(None) is None
|
|
211
|
+
|
|
212
|
+
def test_convert_thinking_config_with_data(self):
|
|
213
|
+
tc = ThinkingConfig(include_thoughts=True, thinking_budget=500)
|
|
214
|
+
result = _convert_thinking_config_to_genai(tc)
|
|
215
|
+
assert isinstance(result, genai_types.ThinkingConfig)
|
|
216
|
+
assert result.include_thoughts == True
|
|
217
|
+
assert result.thinking_budget == 500
|
|
218
|
+
|
|
219
|
+
def test_convert_schema_none(self):
|
|
220
|
+
assert _convert_schema_to_genai(None) is None
|
|
221
|
+
|
|
222
|
+
def test_convert_schema_simple(self):
|
|
223
|
+
schema = Schema(type="string", description="A name")
|
|
224
|
+
result = _convert_schema_to_genai(schema)
|
|
225
|
+
assert isinstance(result, genai_types.Schema)
|
|
226
|
+
assert result.type == "string"
|
|
227
|
+
assert result.description == "A name"
|
|
228
|
+
|
|
229
|
+
def test_convert_schema_with_properties(self):
|
|
230
|
+
schema = Schema(
|
|
231
|
+
type="object",
|
|
232
|
+
properties={
|
|
233
|
+
"name": Schema(type="string"),
|
|
234
|
+
"age": Schema(type="integer"),
|
|
235
|
+
},
|
|
236
|
+
required=["name"]
|
|
237
|
+
)
|
|
238
|
+
result = _convert_schema_to_genai(schema)
|
|
239
|
+
assert isinstance(result, genai_types.Schema)
|
|
240
|
+
assert result.type == "object"
|
|
241
|
+
assert "name" in result.properties
|
|
242
|
+
assert "age" in result.properties
|
|
243
|
+
assert isinstance(result.properties["name"], genai_types.Schema)
|
|
244
|
+
assert result.required == ["name"]
|
|
245
|
+
|
|
246
|
+
def test_convert_schema_with_items(self):
|
|
247
|
+
schema = Schema(
|
|
248
|
+
type="array",
|
|
249
|
+
items=Schema(type="string")
|
|
250
|
+
)
|
|
251
|
+
result = _convert_schema_to_genai(schema)
|
|
252
|
+
assert isinstance(result, genai_types.Schema)
|
|
253
|
+
assert result.type == "array"
|
|
254
|
+
assert isinstance(result.items, genai_types.Schema)
|
|
255
|
+
assert result.items.type == "string"
|
|
256
|
+
|
|
257
|
+
def test_convert_function_declaration(self):
|
|
258
|
+
fd = FunctionDeclaration(
|
|
259
|
+
name="get_weather",
|
|
260
|
+
description="Get weather for a location",
|
|
261
|
+
parameters=Schema(
|
|
262
|
+
type="object",
|
|
263
|
+
properties={
|
|
264
|
+
"location": Schema(type="string", description="City name"),
|
|
265
|
+
},
|
|
266
|
+
required=["location"]
|
|
267
|
+
)
|
|
268
|
+
)
|
|
269
|
+
result = _convert_function_declaration_to_genai(fd)
|
|
270
|
+
assert isinstance(result, genai_types.FunctionDeclaration)
|
|
271
|
+
assert result.name == "get_weather"
|
|
272
|
+
assert result.description == "Get weather for a location"
|
|
273
|
+
assert isinstance(result.parameters, genai_types.Schema)
|
|
274
|
+
|
|
275
|
+
def test_convert_tool(self):
|
|
276
|
+
tool = Tool(
|
|
277
|
+
function_declarations=[
|
|
278
|
+
FunctionDeclaration(name="func1", description="First function"),
|
|
279
|
+
FunctionDeclaration(name="func2", description="Second function"),
|
|
280
|
+
]
|
|
281
|
+
)
|
|
282
|
+
result = _convert_tool_to_genai(tool)
|
|
283
|
+
assert isinstance(result, genai_types.Tool)
|
|
284
|
+
assert len(result.function_declarations) == 2
|
|
285
|
+
assert all(isinstance(fd, genai_types.FunctionDeclaration) for fd in result.function_declarations)
|
|
286
|
+
|
|
287
|
+
def test_convert_tool_no_declarations(self):
|
|
288
|
+
tool = Tool(function_declarations=None)
|
|
289
|
+
result = _convert_tool_to_genai(tool)
|
|
290
|
+
assert isinstance(result, genai_types.Tool)
|
|
291
|
+
assert result.function_declarations is None
|
|
292
|
+
|
|
293
|
+
def test_convert_tools_none(self):
|
|
294
|
+
assert _convert_tools_to_genai(None) is None
|
|
295
|
+
|
|
296
|
+
def test_convert_tools_list(self):
|
|
297
|
+
tools = [
|
|
298
|
+
Tool(function_declarations=[FunctionDeclaration(name="f1")]),
|
|
299
|
+
Tool(function_declarations=[FunctionDeclaration(name="f2")]),
|
|
300
|
+
]
|
|
301
|
+
result = _convert_tools_to_genai(tools)
|
|
302
|
+
assert len(result) == 2
|
|
303
|
+
assert all(isinstance(t, genai_types.Tool) for t in result)
|
|
304
|
+
|
|
305
|
+
def test_convert_function_calling_config_none(self):
|
|
306
|
+
assert _convert_function_calling_config_to_genai(None) is None
|
|
307
|
+
|
|
308
|
+
def test_convert_function_calling_config_with_data(self):
|
|
309
|
+
fcc = FunctionCallingConfig(mode="ANY", allowed_function_names=["allowed_func"])
|
|
310
|
+
result = _convert_function_calling_config_to_genai(fcc)
|
|
311
|
+
assert isinstance(result, genai_types.FunctionCallingConfig)
|
|
312
|
+
assert result.mode == "ANY"
|
|
313
|
+
assert result.allowed_function_names == ["allowed_func"]
|
|
314
|
+
|
|
315
|
+
def test_convert_tool_config_none(self):
|
|
316
|
+
assert _convert_tool_config_to_genai(None) is None
|
|
317
|
+
|
|
318
|
+
def test_convert_tool_config_with_data(self):
|
|
319
|
+
tc = ToolConfig(
|
|
320
|
+
function_calling_config=FunctionCallingConfig(mode="NONE")
|
|
321
|
+
)
|
|
322
|
+
result = _convert_tool_config_to_genai(tc)
|
|
323
|
+
assert isinstance(result, genai_types.ToolConfig)
|
|
324
|
+
assert isinstance(result.function_calling_config, genai_types.FunctionCallingConfig)
|
|
325
|
+
assert result.function_calling_config.mode == "NONE"
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
class TestDictFieldsExactMatch:
|
|
329
|
+
"""
|
|
330
|
+
Test that __dict__ of custom types contains exactly the fields needed
|
|
331
|
+
for model_construct to work with google.genai.types
|
|
332
|
+
"""
|
|
333
|
+
|
|
334
|
+
def test_blob_dict_fields(self):
|
|
335
|
+
"""Verify Blob.__dict__ contains only expected fields"""
|
|
336
|
+
blob = Blob(data=b"test", mime_type="text/plain", display_name="file.txt")
|
|
337
|
+
dict_keys = set(blob.__dict__.keys())
|
|
338
|
+
expected_keys = {"data", "mime_type", "display_name"}
|
|
339
|
+
assert dict_keys == expected_keys
|
|
340
|
+
|
|
341
|
+
def test_function_call_dict_fields(self):
|
|
342
|
+
"""Verify FunctionCall.__dict__ contains only expected fields"""
|
|
343
|
+
fc = FunctionCall(id="1", args={"a": 1}, name="test")
|
|
344
|
+
dict_keys = set(fc.__dict__.keys())
|
|
345
|
+
expected_keys = {"id", "args", "name"}
|
|
346
|
+
assert dict_keys == expected_keys
|
|
347
|
+
|
|
348
|
+
def test_function_response_dict_fields(self):
|
|
349
|
+
"""Verify FunctionResponse.__dict__ contains only expected fields"""
|
|
350
|
+
fr = FunctionResponse(id="1", name="test", response={"x": 1})
|
|
351
|
+
dict_keys = set(fr.__dict__.keys())
|
|
352
|
+
expected_keys = {"id", "name", "response"}
|
|
353
|
+
assert dict_keys == expected_keys
|
|
354
|
+
|
|
355
|
+
def test_thinking_config_dict_fields(self):
|
|
356
|
+
"""Verify ThinkingConfig.__dict__ contains only expected fields"""
|
|
357
|
+
tc = ThinkingConfig(include_thoughts=True, thinking_budget=100)
|
|
358
|
+
dict_keys = set(tc.__dict__.keys())
|
|
359
|
+
expected_keys = {"include_thoughts", "thinking_budget"}
|
|
360
|
+
assert dict_keys == expected_keys
|
|
361
|
+
|
|
362
|
+
def test_function_calling_config_dict_fields(self):
|
|
363
|
+
"""Verify FunctionCallingConfig.__dict__ contains only expected fields"""
|
|
364
|
+
fcc = FunctionCallingConfig(mode="AUTO", allowed_function_names=["f"])
|
|
365
|
+
dict_keys = set(fcc.__dict__.keys())
|
|
366
|
+
expected_keys = {"mode", "allowed_function_names"}
|
|
367
|
+
assert dict_keys == expected_keys
|
|
368
|
+
|
|
369
|
+
def test_part_dict_fields(self):
|
|
370
|
+
"""Verify Part.__dict__ contains only expected fields"""
|
|
371
|
+
part = Part(text="hello", thought=True, function_call=None, function_response=None, inline_data=None)
|
|
372
|
+
dict_keys = set(part.__dict__.keys())
|
|
373
|
+
expected_keys = {"text", "function_call", "function_response", "thought", "inline_data"}
|
|
374
|
+
assert dict_keys == expected_keys
|
|
375
|
+
|
|
376
|
+
def test_content_dict_fields(self):
|
|
377
|
+
"""Verify Content.__dict__ contains only expected fields"""
|
|
378
|
+
content = Content(role="user", parts=[Part(text="hi")])
|
|
379
|
+
dict_keys = set(content.__dict__.keys())
|
|
380
|
+
expected_keys = {"role", "parts"}
|
|
381
|
+
assert dict_keys == expected_keys
|
|
382
|
+
|
|
383
|
+
|
|
384
|
+
class TestModelConstructCompatibility:
|
|
385
|
+
"""
|
|
386
|
+
Test that model_construct(**obj.__dict__) works correctly
|
|
387
|
+
by verifying genai types accept our custom type's __dict__
|
|
388
|
+
"""
|
|
389
|
+
|
|
390
|
+
def test_blob_model_construct_works(self):
|
|
391
|
+
"""genai_types.Blob.model_construct should work with Blob.__dict__"""
|
|
392
|
+
blob = Blob(data=b"test", mime_type="text/plain")
|
|
393
|
+
# This should not raise
|
|
394
|
+
result = genai_types.Blob.model_construct(**blob.__dict__)
|
|
395
|
+
assert result.data == b"test"
|
|
396
|
+
assert result.mime_type == "text/plain"
|
|
397
|
+
|
|
398
|
+
def test_function_call_model_construct_works(self):
|
|
399
|
+
"""genai_types.FunctionCall.model_construct should work with FunctionCall.__dict__"""
|
|
400
|
+
fc = FunctionCall(id="test", args={"x": 1}, name="func")
|
|
401
|
+
result = genai_types.FunctionCall.model_construct(**fc.__dict__)
|
|
402
|
+
assert result.id == "test"
|
|
403
|
+
assert result.args == {"x": 1}
|
|
404
|
+
assert result.name == "func"
|
|
405
|
+
|
|
406
|
+
def test_function_response_model_construct_works(self):
|
|
407
|
+
"""genai_types.FunctionResponse.model_construct should work with FunctionResponse.__dict__"""
|
|
408
|
+
fr = FunctionResponse(id="resp", name="func", response={"out": 42})
|
|
409
|
+
result = genai_types.FunctionResponse.model_construct(**fr.__dict__)
|
|
410
|
+
assert result.id == "resp"
|
|
411
|
+
assert result.name == "func"
|
|
412
|
+
assert result.response == {"out": 42}
|
|
413
|
+
|
|
414
|
+
def test_thinking_config_model_construct_works(self):
|
|
415
|
+
"""genai_types.ThinkingConfig.model_construct should work with ThinkingConfig.__dict__"""
|
|
416
|
+
tc = ThinkingConfig(include_thoughts=True, thinking_budget=200)
|
|
417
|
+
result = genai_types.ThinkingConfig.model_construct(**tc.__dict__)
|
|
418
|
+
assert result.include_thoughts == True
|
|
419
|
+
assert result.thinking_budget == 200
|
|
420
|
+
|
|
421
|
+
def test_function_calling_config_model_construct_works(self):
|
|
422
|
+
"""genai_types.FunctionCallingConfig.model_construct should work with FunctionCallingConfig.__dict__"""
|
|
423
|
+
fcc = FunctionCallingConfig(mode="AUTO", allowed_function_names=["f1", "f2"])
|
|
424
|
+
result = genai_types.FunctionCallingConfig.model_construct(**fcc.__dict__)
|
|
425
|
+
assert result.mode == "AUTO"
|
|
426
|
+
assert result.allowed_function_names == ["f1", "f2"]
|
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from unittest.mock import Mock, patch, MagicMock
|
|
3
|
+
from promptbuilder.llm_client.main import get_models_list
|
|
4
|
+
from promptbuilder.llm_client.types import Model
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def test_get_models_list_all_providers():
|
|
8
|
+
"""Test that get_models_list returns models from all providers when no provider specified."""
|
|
9
|
+
with patch('promptbuilder.llm_client.google_client.GoogleLLMClient.models_list') as mock_google, \
|
|
10
|
+
patch('promptbuilder.llm_client.anthropic_client.AnthropicLLMClient.models_list') as mock_anthropic, \
|
|
11
|
+
patch('promptbuilder.llm_client.openai_client.OpenaiLLMClient.models_list') as mock_openai, \
|
|
12
|
+
patch('promptbuilder.llm_client.bedrock_client.BedrockLLMClient.models_list') as mock_bedrock:
|
|
13
|
+
|
|
14
|
+
# Setup mock returns
|
|
15
|
+
mock_google.return_value = [
|
|
16
|
+
Model(full_model_name="google:gemini-1.5-flash", provider="google", model="gemini-1.5-flash", display_name="Gemini 1.5 Flash")
|
|
17
|
+
]
|
|
18
|
+
mock_anthropic.return_value = [
|
|
19
|
+
Model(full_model_name="anthropic:claude-3-opus-20240229", provider="anthropic", model="claude-3-opus-20240229", display_name="Claude 3 Opus")
|
|
20
|
+
]
|
|
21
|
+
mock_openai.return_value = [
|
|
22
|
+
Model(full_model_name="openai:gpt-4", provider="openai", model="gpt-4")
|
|
23
|
+
]
|
|
24
|
+
mock_bedrock.return_value = [
|
|
25
|
+
Model(full_model_name="bedrock:arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-sonnet-20240229-v1:0",
|
|
26
|
+
provider="bedrock",
|
|
27
|
+
model="arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-sonnet-20240229-v1:0",
|
|
28
|
+
display_name="Claude 3 Sonnet")
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
# Call the function
|
|
32
|
+
result = get_models_list()
|
|
33
|
+
|
|
34
|
+
# Assertions
|
|
35
|
+
assert len(result) == 4
|
|
36
|
+
assert all(isinstance(model, Model) for model in result)
|
|
37
|
+
assert any(model.provider == "google" for model in result)
|
|
38
|
+
assert any(model.provider == "anthropic" for model in result)
|
|
39
|
+
assert any(model.provider == "openai" for model in result)
|
|
40
|
+
assert any(model.provider == "bedrock" for model in result)
|
|
41
|
+
|
|
42
|
+
# Verify all mocks were called
|
|
43
|
+
mock_google.assert_called_once()
|
|
44
|
+
mock_anthropic.assert_called_once()
|
|
45
|
+
mock_openai.assert_called_once()
|
|
46
|
+
mock_bedrock.assert_called_once()
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def test_get_models_list_google_provider():
|
|
50
|
+
"""Test that get_models_list returns only Google models when google provider specified."""
|
|
51
|
+
with patch('promptbuilder.llm_client.google_client.GoogleLLMClient.models_list') as mock_google:
|
|
52
|
+
mock_google.return_value = [
|
|
53
|
+
Model(full_model_name="google:gemini-1.5-flash", provider="google", model="gemini-1.5-flash", display_name="Gemini 1.5 Flash"),
|
|
54
|
+
Model(full_model_name="google:gemini-1.5-pro", provider="google", model="gemini-1.5-pro", display_name="Gemini 1.5 Pro")
|
|
55
|
+
]
|
|
56
|
+
|
|
57
|
+
result = get_models_list(provider="google")
|
|
58
|
+
|
|
59
|
+
assert len(result) == 2
|
|
60
|
+
assert all(model.provider == "google" for model in result)
|
|
61
|
+
mock_google.assert_called_once()
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def test_get_models_list_anthropic_provider():
|
|
65
|
+
"""Test that get_models_list returns only Anthropic models when anthropic provider specified."""
|
|
66
|
+
with patch('promptbuilder.llm_client.anthropic_client.AnthropicLLMClient.models_list') as mock_anthropic:
|
|
67
|
+
mock_anthropic.return_value = [
|
|
68
|
+
Model(full_model_name="anthropic:claude-3-opus-20240229", provider="anthropic", model="claude-3-opus-20240229", display_name="Claude 3 Opus"),
|
|
69
|
+
Model(full_model_name="anthropic:claude-3-sonnet-20240229", provider="anthropic", model="claude-3-sonnet-20240229", display_name="Claude 3 Sonnet")
|
|
70
|
+
]
|
|
71
|
+
|
|
72
|
+
result = get_models_list(provider="anthropic")
|
|
73
|
+
|
|
74
|
+
assert len(result) == 2
|
|
75
|
+
assert all(model.provider == "anthropic" for model in result)
|
|
76
|
+
mock_anthropic.assert_called_once()
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def test_get_models_list_openai_provider():
|
|
80
|
+
"""Test that get_models_list returns only OpenAI models when openai provider specified."""
|
|
81
|
+
with patch('promptbuilder.llm_client.openai_client.OpenaiLLMClient.models_list') as mock_openai:
|
|
82
|
+
mock_openai.return_value = [
|
|
83
|
+
Model(full_model_name="openai:gpt-4", provider="openai", model="gpt-4"),
|
|
84
|
+
Model(full_model_name="openai:gpt-3.5-turbo", provider="openai", model="gpt-3.5-turbo")
|
|
85
|
+
]
|
|
86
|
+
|
|
87
|
+
result = get_models_list(provider="openai")
|
|
88
|
+
|
|
89
|
+
assert len(result) == 2
|
|
90
|
+
assert all(model.provider == "openai" for model in result)
|
|
91
|
+
mock_openai.assert_called_once()
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def test_get_models_list_bedrock_provider():
|
|
95
|
+
"""Test that get_models_list returns only Bedrock models when bedrock provider specified."""
|
|
96
|
+
with patch('promptbuilder.llm_client.bedrock_client.BedrockLLMClient.models_list') as mock_bedrock:
|
|
97
|
+
mock_bedrock.return_value = [
|
|
98
|
+
Model(full_model_name="bedrock:arn1", provider="bedrock", model="arn1", display_name="Model 1"),
|
|
99
|
+
Model(full_model_name="bedrock:arn2", provider="bedrock", model="arn2", display_name="Model 2")
|
|
100
|
+
]
|
|
101
|
+
|
|
102
|
+
result = get_models_list(provider="bedrock")
|
|
103
|
+
|
|
104
|
+
assert len(result) == 2
|
|
105
|
+
assert all(model.provider == "bedrock" for model in result)
|
|
106
|
+
mock_bedrock.assert_called_once()
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def test_get_models_list_invalid_provider():
|
|
110
|
+
"""Test that get_models_list returns empty list for invalid provider."""
|
|
111
|
+
result = get_models_list(provider="invalid_provider")
|
|
112
|
+
|
|
113
|
+
assert result == []
|
|
114
|
+
assert isinstance(result, list)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def test_get_models_list_empty_responses():
|
|
118
|
+
"""Test that get_models_list handles empty responses from providers."""
|
|
119
|
+
with patch('promptbuilder.llm_client.google_client.GoogleLLMClient.models_list') as mock_google, \
|
|
120
|
+
patch('promptbuilder.llm_client.anthropic_client.AnthropicLLMClient.models_list') as mock_anthropic, \
|
|
121
|
+
patch('promptbuilder.llm_client.openai_client.OpenaiLLMClient.models_list') as mock_openai, \
|
|
122
|
+
patch('promptbuilder.llm_client.bedrock_client.BedrockLLMClient.models_list') as mock_bedrock:
|
|
123
|
+
|
|
124
|
+
# All providers return empty lists
|
|
125
|
+
mock_google.return_value = []
|
|
126
|
+
mock_anthropic.return_value = []
|
|
127
|
+
mock_openai.return_value = []
|
|
128
|
+
mock_bedrock.return_value = []
|
|
129
|
+
|
|
130
|
+
result = get_models_list()
|
|
131
|
+
|
|
132
|
+
assert result == []
|
|
133
|
+
assert isinstance(result, list)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def test_model_structure():
|
|
137
|
+
"""Test that Model objects have the expected structure."""
|
|
138
|
+
with patch('promptbuilder.llm_client.google_client.GoogleLLMClient.models_list') as mock_google:
|
|
139
|
+
mock_google.return_value = [
|
|
140
|
+
Model(
|
|
141
|
+
full_model_name="google:gemini-1.5-flash",
|
|
142
|
+
provider="google",
|
|
143
|
+
model="gemini-1.5-flash",
|
|
144
|
+
display_name="Gemini 1.5 Flash"
|
|
145
|
+
)
|
|
146
|
+
]
|
|
147
|
+
|
|
148
|
+
result = get_models_list(provider="google")
|
|
149
|
+
|
|
150
|
+
assert len(result) == 1
|
|
151
|
+
model = result[0]
|
|
152
|
+
|
|
153
|
+
# Check all fields exist
|
|
154
|
+
assert hasattr(model, 'full_model_name')
|
|
155
|
+
assert hasattr(model, 'provider')
|
|
156
|
+
assert hasattr(model, 'model')
|
|
157
|
+
assert hasattr(model, 'display_name')
|
|
158
|
+
|
|
159
|
+
# Check field values
|
|
160
|
+
assert model.full_model_name == "google:gemini-1.5-flash"
|
|
161
|
+
assert model.provider == "google"
|
|
162
|
+
assert model.model == "gemini-1.5-flash"
|
|
163
|
+
assert model.display_name == "Gemini 1.5 Flash"
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def test_model_without_display_name():
|
|
167
|
+
"""Test that Model objects can be created without display_name (optional field)."""
|
|
168
|
+
with patch('promptbuilder.llm_client.openai_client.OpenaiLLMClient.models_list') as mock_openai:
|
|
169
|
+
mock_openai.return_value = [
|
|
170
|
+
Model(
|
|
171
|
+
full_model_name="openai:gpt-4",
|
|
172
|
+
provider="openai",
|
|
173
|
+
model="gpt-4"
|
|
174
|
+
)
|
|
175
|
+
]
|
|
176
|
+
|
|
177
|
+
result = get_models_list(provider="openai")
|
|
178
|
+
|
|
179
|
+
assert len(result) == 1
|
|
180
|
+
model = result[0]
|
|
181
|
+
|
|
182
|
+
assert model.full_model_name == "openai:gpt-4"
|
|
183
|
+
assert model.provider == "openai"
|
|
184
|
+
assert model.model == "gpt-4"
|
|
185
|
+
assert model.display_name is None
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{promptbuilder-0.4.40 → promptbuilder-0.4.42}/promptbuilder/llm_client/logfire_decorators.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|