promptbuilder 0.4.23__py3-none-any.whl → 0.4.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- promptbuilder/llm_client/aisuite_client.py +2 -2
- promptbuilder/llm_client/anthropic_client.py +2 -2
- promptbuilder/llm_client/base_client.py +131 -41
- promptbuilder/llm_client/bedrock_client.py +2 -2
- promptbuilder/llm_client/google_client.py +2 -2
- promptbuilder/llm_client/openai_client.py +2 -2
- promptbuilder/llm_client/types.py +1 -0
- {promptbuilder-0.4.23.dist-info → promptbuilder-0.4.24.dist-info}/METADATA +1 -1
- {promptbuilder-0.4.23.dist-info → promptbuilder-0.4.24.dist-info}/RECORD +12 -12
- {promptbuilder-0.4.23.dist-info → promptbuilder-0.4.24.dist-info}/WHEEL +0 -0
- {promptbuilder-0.4.23.dist-info → promptbuilder-0.4.24.dist-info}/licenses/LICENSE +0 -0
- {promptbuilder-0.4.23.dist-info → promptbuilder-0.4.24.dist-info}/top_level.txt +0 -0
|
@@ -63,7 +63,7 @@ class AiSuiteLLMClient(BaseLLMClient):
|
|
|
63
63
|
total_token_count=usage.total_tokens if hasattr(usage, "total_tokens") else usage["total_tokens"],
|
|
64
64
|
)
|
|
65
65
|
|
|
66
|
-
def
|
|
66
|
+
def _create(
|
|
67
67
|
self,
|
|
68
68
|
messages: list[Content],
|
|
69
69
|
result_type: ResultType = None,
|
|
@@ -222,7 +222,7 @@ class AiSuiteLLMClientAsync(BaseLLMClientAsync):
|
|
|
222
222
|
total_token_count=usage.total_tokens if hasattr(usage, "total_tokens") else usage["total_tokens"],
|
|
223
223
|
)
|
|
224
224
|
|
|
225
|
-
async def
|
|
225
|
+
async def _create(
|
|
226
226
|
self,
|
|
227
227
|
messages: list[Content],
|
|
228
228
|
result_type: ResultType = None,
|
|
@@ -192,7 +192,7 @@ class AnthropicLLMClient(BaseLLMClient):
|
|
|
192
192
|
return anthropic_messages
|
|
193
193
|
|
|
194
194
|
@_error_handler
|
|
195
|
-
def
|
|
195
|
+
def _create(
|
|
196
196
|
self,
|
|
197
197
|
messages: list[Content],
|
|
198
198
|
result_type: ResultType = None,
|
|
@@ -445,7 +445,7 @@ class AnthropicLLMClientAsync(BaseLLMClientAsync):
|
|
|
445
445
|
return self._api_key
|
|
446
446
|
|
|
447
447
|
@_error_handler_async
|
|
448
|
-
async def
|
|
448
|
+
async def _create(
|
|
449
449
|
self,
|
|
450
450
|
messages: list[Content],
|
|
451
451
|
result_type: ResultType = None,
|
|
@@ -77,7 +77,6 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
77
77
|
@logfire_decorators.create
|
|
78
78
|
@utils.retry_cls
|
|
79
79
|
@utils.rpm_limit_cls
|
|
80
|
-
@abstractmethod
|
|
81
80
|
def create(
|
|
82
81
|
self,
|
|
83
82
|
messages: list[Content],
|
|
@@ -88,9 +87,57 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
88
87
|
max_tokens: int | None = None,
|
|
89
88
|
tools: list[Tool] | None = None,
|
|
90
89
|
tool_config: ToolConfig = ToolConfig(),
|
|
90
|
+
autocomplete: bool = False
|
|
91
|
+
) -> Response:
|
|
92
|
+
if autocomplete and (result_type == "tools" or isinstance(result_type, type)):
|
|
93
|
+
raise ValueError("autocompletion is not supported with 'tools' or pydantic model result_type")
|
|
94
|
+
|
|
95
|
+
if max_tokens is None:
|
|
96
|
+
max_tokens = self.default_max_tokens
|
|
97
|
+
|
|
98
|
+
response = self._create(
|
|
99
|
+
messages=messages,
|
|
100
|
+
result_type=result_type,
|
|
101
|
+
thinking_config=thinking_config,
|
|
102
|
+
system_message=system_message,
|
|
103
|
+
max_tokens=max_tokens,
|
|
104
|
+
tools=tools,
|
|
105
|
+
tool_config=tool_config,
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
total_count = BaseLLMClient._response_out_tokens(response)
|
|
109
|
+
|
|
110
|
+
while autocomplete and response.candidates and response.candidates[0].finish_reason == FinishReason.MAX_TOKENS:
|
|
111
|
+
BaseLLMClient._append_generated_part(messages, response)
|
|
112
|
+
|
|
113
|
+
response = self._create(
|
|
114
|
+
messages=messages,
|
|
115
|
+
result_type=result_type,
|
|
116
|
+
thinking_config=thinking_config,
|
|
117
|
+
system_message=system_message,
|
|
118
|
+
max_tokens=max_tokens,
|
|
119
|
+
tools=tools,
|
|
120
|
+
tool_config=tool_config,
|
|
121
|
+
)
|
|
122
|
+
total_count += BaseLLMClient._response_out_tokens(response)
|
|
123
|
+
if max_tokens is not None and total_count >= max_tokens:
|
|
124
|
+
break
|
|
125
|
+
return response
|
|
126
|
+
|
|
127
|
+
@abstractmethod
|
|
128
|
+
def _create(
|
|
129
|
+
self,
|
|
130
|
+
messages: list[Content],
|
|
131
|
+
result_type: ResultType = None,
|
|
132
|
+
*,
|
|
133
|
+
thinking_config: ThinkingConfig | None = None,
|
|
134
|
+
system_message: str | None = None,
|
|
135
|
+
max_tokens: int | None = None,
|
|
136
|
+
tools: list[Tool] | None = None,
|
|
137
|
+
tool_config: ToolConfig = ToolConfig(),
|
|
91
138
|
) -> Response:
|
|
92
139
|
pass
|
|
93
|
-
|
|
140
|
+
|
|
94
141
|
@overload
|
|
95
142
|
def create_value(
|
|
96
143
|
self,
|
|
@@ -102,6 +149,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
102
149
|
max_tokens: int | None = None,
|
|
103
150
|
tools: None = None,
|
|
104
151
|
tool_choice_mode: Literal["NONE"] = "NONE",
|
|
152
|
+
autocomplete: bool = False,
|
|
105
153
|
) -> str: ...
|
|
106
154
|
@overload
|
|
107
155
|
def create_value(
|
|
@@ -114,6 +162,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
114
162
|
max_tokens: int | None = None,
|
|
115
163
|
tools: None = None,
|
|
116
164
|
tool_choice_mode: Literal["NONE"] = "NONE",
|
|
165
|
+
autocomplete: bool = False,
|
|
117
166
|
) -> Json: ...
|
|
118
167
|
@overload
|
|
119
168
|
def create_value(
|
|
@@ -126,6 +175,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
126
175
|
max_tokens: int | None = None,
|
|
127
176
|
tools: None = None,
|
|
128
177
|
tool_choice_mode: Literal["NONE"] = "NONE",
|
|
178
|
+
autocomplete: bool = False,
|
|
129
179
|
) -> PydanticStructure: ...
|
|
130
180
|
@overload
|
|
131
181
|
def create_value(
|
|
@@ -138,6 +188,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
138
188
|
max_tokens: int | None = None,
|
|
139
189
|
tools: list[Tool],
|
|
140
190
|
tool_choice_mode: Literal["ANY"],
|
|
191
|
+
autocomplete: bool = False,
|
|
141
192
|
) -> list[FunctionCall]: ...
|
|
142
193
|
|
|
143
194
|
def create_value(
|
|
@@ -177,21 +228,9 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
177
228
|
max_tokens=max_tokens,
|
|
178
229
|
tools=tools,
|
|
179
230
|
tool_config=ToolConfig(function_calling_config=FunctionCallingConfig(mode=tool_choice_mode)),
|
|
231
|
+
autocomplete=autocomplete,
|
|
180
232
|
)
|
|
181
233
|
|
|
182
|
-
while autocomplete and response.candidates and response.candidates[0].finish_reason in [FinishReason.STOP, FinishReason.MAX_TOKENS]:
|
|
183
|
-
BaseLLMClient._append_generated_part(messages, response)
|
|
184
|
-
|
|
185
|
-
response = self.create(
|
|
186
|
-
messages=messages,
|
|
187
|
-
result_type=result_type,
|
|
188
|
-
thinking_config=thinking_config,
|
|
189
|
-
system_message=system_message,
|
|
190
|
-
max_tokens=max_tokens,
|
|
191
|
-
tools=tools,
|
|
192
|
-
tool_config=ToolConfig(function_calling_config=FunctionCallingConfig(mode=tool_choice_mode)),
|
|
193
|
-
)
|
|
194
|
-
|
|
195
234
|
if result_type is None:
|
|
196
235
|
return response.text
|
|
197
236
|
else:
|
|
@@ -206,14 +245,14 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
206
245
|
|
|
207
246
|
text_parts = [
|
|
208
247
|
part for part in response.candidates[0].content.parts if part.text is not None and not part.thought
|
|
209
|
-
] if response.candidates[0].content
|
|
248
|
+
] if response.candidates[0].content.parts else None
|
|
210
249
|
if text_parts is not None and len(text_parts) > 0:
|
|
211
250
|
response_text = "".join(part.text for part in text_parts)
|
|
212
251
|
is_thought = False
|
|
213
252
|
else:
|
|
214
253
|
thought_parts = [
|
|
215
254
|
part for part in response.candidates[0].content.parts if part.text and part.thought
|
|
216
|
-
] if response.candidates[0].content
|
|
255
|
+
] if response.candidates[0].content.parts else None
|
|
217
256
|
if thought_parts is not None and len(thought_parts) > 0:
|
|
218
257
|
response_text = "".join(part.text for part in thought_parts)
|
|
219
258
|
is_thought = True
|
|
@@ -231,6 +270,10 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
231
270
|
else:
|
|
232
271
|
messages.append(Content(parts=[Part(text=response_text, thought=is_thought)], role="model"))
|
|
233
272
|
|
|
273
|
+
@staticmethod
|
|
274
|
+
def _response_out_tokens(response: Response):
|
|
275
|
+
return 0 if not response.usage_metadata else (response.usage_metadata.candidates_token_count or 0) + (response.usage_metadata.thoughts_token_count or 0)
|
|
276
|
+
|
|
234
277
|
@logfire_decorators.create_stream
|
|
235
278
|
@utils.retry_cls
|
|
236
279
|
@utils.rpm_limit_cls
|
|
@@ -255,6 +298,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
255
298
|
max_tokens: int | None = None,
|
|
256
299
|
tools: None = None,
|
|
257
300
|
tool_choice_mode: Literal["NONE"] = "NONE",
|
|
301
|
+
autocomplete: bool = False,
|
|
258
302
|
) -> str: ...
|
|
259
303
|
@overload
|
|
260
304
|
def from_text(
|
|
@@ -267,6 +311,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
267
311
|
max_tokens: int | None = None,
|
|
268
312
|
tools: None = None,
|
|
269
313
|
tool_choice_mode: Literal["NONE"] = "NONE",
|
|
314
|
+
autocomplete: bool = False,
|
|
270
315
|
) -> Json: ...
|
|
271
316
|
@overload
|
|
272
317
|
def from_text(
|
|
@@ -279,6 +324,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
279
324
|
max_tokens: int | None = None,
|
|
280
325
|
tools: None = None,
|
|
281
326
|
tool_choice_mode: Literal["NONE"] = "NONE",
|
|
327
|
+
autocomplete: bool = False,
|
|
282
328
|
) -> PydanticStructure: ...
|
|
283
329
|
@overload
|
|
284
330
|
def from_text(
|
|
@@ -291,6 +337,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
291
337
|
max_tokens: int | None = None,
|
|
292
338
|
tools: list[Tool],
|
|
293
339
|
tool_choice_mode: Literal["ANY"],
|
|
340
|
+
autocomplete: bool = False,
|
|
294
341
|
) -> list[FunctionCall]: ...
|
|
295
342
|
|
|
296
343
|
def from_text(
|
|
@@ -303,6 +350,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
303
350
|
max_tokens: int | None = None,
|
|
304
351
|
tools: list[Tool] | None = None,
|
|
305
352
|
tool_choice_mode: Literal["ANY", "NONE"] = "NONE",
|
|
353
|
+
autocomplete: bool = False,
|
|
306
354
|
):
|
|
307
355
|
return self.create_value(
|
|
308
356
|
messages=[Content(parts=[Part(text=prompt)], role="user")],
|
|
@@ -312,6 +360,7 @@ class BaseLLMClient(ABC, utils.InheritDecoratorsMixin):
|
|
|
312
360
|
max_tokens=max_tokens,
|
|
313
361
|
tools=tools,
|
|
314
362
|
tool_choice_mode=tool_choice_mode,
|
|
363
|
+
autocomplete=autocomplete,
|
|
315
364
|
)
|
|
316
365
|
|
|
317
366
|
|
|
@@ -360,7 +409,6 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
360
409
|
@logfire_decorators.create_async
|
|
361
410
|
@utils.retry_cls_async
|
|
362
411
|
@utils.rpm_limit_cls_async
|
|
363
|
-
@abstractmethod
|
|
364
412
|
async def create(
|
|
365
413
|
self,
|
|
366
414
|
messages: list[Content],
|
|
@@ -371,9 +419,57 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
371
419
|
max_tokens: int | None = None,
|
|
372
420
|
tools: list[Tool] | None = None,
|
|
373
421
|
tool_config: ToolConfig = ToolConfig(),
|
|
422
|
+
autocomplete: bool = False,
|
|
423
|
+
) -> Response:
|
|
424
|
+
if autocomplete and (result_type == "tools" or isinstance(result_type, type)):
|
|
425
|
+
raise ValueError("autocompletion is not supported with 'tools' or pydantic model result_type")
|
|
426
|
+
|
|
427
|
+
if max_tokens is None:
|
|
428
|
+
max_tokens = self.default_max_tokens
|
|
429
|
+
|
|
430
|
+
response = await self._create(
|
|
431
|
+
messages=messages,
|
|
432
|
+
result_type=result_type,
|
|
433
|
+
thinking_config=thinking_config,
|
|
434
|
+
system_message=system_message,
|
|
435
|
+
max_tokens=max_tokens,
|
|
436
|
+
tools=tools,
|
|
437
|
+
tool_config=tool_config,
|
|
438
|
+
)
|
|
439
|
+
|
|
440
|
+
total_count = BaseLLMClient._response_out_tokens(response)
|
|
441
|
+
|
|
442
|
+
while autocomplete and response.candidates and response.candidates[0].finish_reason == FinishReason.MAX_TOKENS:
|
|
443
|
+
BaseLLMClient._append_generated_part(messages, response)
|
|
444
|
+
|
|
445
|
+
response = await self._create(
|
|
446
|
+
messages=messages,
|
|
447
|
+
result_type=result_type,
|
|
448
|
+
thinking_config=thinking_config,
|
|
449
|
+
system_message=system_message,
|
|
450
|
+
max_tokens=max_tokens,
|
|
451
|
+
tools=tools,
|
|
452
|
+
tool_config=tool_config,
|
|
453
|
+
)
|
|
454
|
+
total_count += BaseLLMClient._response_out_tokens(response)
|
|
455
|
+
if max_tokens is not None and total_count >= max_tokens:
|
|
456
|
+
break
|
|
457
|
+
return response
|
|
458
|
+
|
|
459
|
+
@abstractmethod
|
|
460
|
+
async def _create(
|
|
461
|
+
self,
|
|
462
|
+
messages: list[Content],
|
|
463
|
+
result_type: ResultType = None,
|
|
464
|
+
*,
|
|
465
|
+
thinking_config: ThinkingConfig | None = None,
|
|
466
|
+
system_message: str | None = None,
|
|
467
|
+
max_tokens: int | None = None,
|
|
468
|
+
tools: list[Tool] | None = None,
|
|
469
|
+
tool_config: ToolConfig = ToolConfig(),
|
|
374
470
|
) -> Response:
|
|
375
471
|
pass
|
|
376
|
-
|
|
472
|
+
|
|
377
473
|
@overload
|
|
378
474
|
async def create_value(
|
|
379
475
|
self,
|
|
@@ -385,6 +481,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
385
481
|
max_tokens: int | None = None,
|
|
386
482
|
tools: None = None,
|
|
387
483
|
tool_choice_mode: Literal["NONE"] = "NONE",
|
|
484
|
+
autocomplete: bool = False,
|
|
388
485
|
) -> str: ...
|
|
389
486
|
@overload
|
|
390
487
|
async def create_value(
|
|
@@ -397,6 +494,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
397
494
|
max_tokens: int | None = None,
|
|
398
495
|
tools: None = None,
|
|
399
496
|
tool_choice_mode: Literal["NONE"] = "NONE",
|
|
497
|
+
autocomplete: bool = False,
|
|
400
498
|
) -> Json: ...
|
|
401
499
|
@overload
|
|
402
500
|
async def create_value(
|
|
@@ -409,6 +507,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
409
507
|
max_tokens: int | None = None,
|
|
410
508
|
tools: None = None,
|
|
411
509
|
tool_choice_mode: Literal["NONE"] = "NONE",
|
|
510
|
+
autocomplete: bool = False,
|
|
412
511
|
) -> PydanticStructure: ...
|
|
413
512
|
@overload
|
|
414
513
|
async def create_value(
|
|
@@ -421,6 +520,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
421
520
|
max_tokens: int | None = None,
|
|
422
521
|
tools: list[Tool],
|
|
423
522
|
tool_choice_mode: Literal["ANY"],
|
|
523
|
+
autocomplete: bool = False,
|
|
424
524
|
) -> list[FunctionCall]: ...
|
|
425
525
|
|
|
426
526
|
async def create_value(
|
|
@@ -436,7 +536,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
436
536
|
autocomplete: bool = False,
|
|
437
537
|
):
|
|
438
538
|
if result_type == "tools":
|
|
439
|
-
response = await self.
|
|
539
|
+
response = await self._create(
|
|
440
540
|
messages=messages,
|
|
441
541
|
result_type=None,
|
|
442
542
|
thinking_config=thinking_config,
|
|
@@ -451,7 +551,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
451
551
|
if part.function_call is not None:
|
|
452
552
|
functions.append(part.function_call)
|
|
453
553
|
return functions
|
|
454
|
-
|
|
554
|
+
|
|
455
555
|
response = await self.create(
|
|
456
556
|
messages=messages,
|
|
457
557
|
result_type=result_type,
|
|
@@ -460,24 +560,8 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
460
560
|
max_tokens=max_tokens,
|
|
461
561
|
tools=tools,
|
|
462
562
|
tool_config=ToolConfig(function_calling_config=FunctionCallingConfig(mode=tool_choice_mode)),
|
|
563
|
+
autocomplete=autocomplete
|
|
463
564
|
)
|
|
464
|
-
|
|
465
|
-
if max_tokens is None:
|
|
466
|
-
max_tokens = self.default_max_tokens
|
|
467
|
-
|
|
468
|
-
while autocomplete and response.candidates and response.candidates[0].finish_reason not in [FinishReason.STOP, FinishReason.MAX_TOKENS]:
|
|
469
|
-
BaseLLMClient._append_generated_part(messages, response)
|
|
470
|
-
|
|
471
|
-
response = await self.create(
|
|
472
|
-
messages=messages,
|
|
473
|
-
result_type=result_type,
|
|
474
|
-
thinking_config=thinking_config,
|
|
475
|
-
system_message=system_message,
|
|
476
|
-
max_tokens=max_tokens,
|
|
477
|
-
tools=tools,
|
|
478
|
-
tool_config=ToolConfig(function_calling_config=FunctionCallingConfig(mode=tool_choice_mode)),
|
|
479
|
-
)
|
|
480
|
-
|
|
481
565
|
if result_type is None:
|
|
482
566
|
return response.text
|
|
483
567
|
else:
|
|
@@ -509,6 +593,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
509
593
|
max_tokens: int | None = None,
|
|
510
594
|
tools: None = None,
|
|
511
595
|
tool_choice_mode: Literal["NONE"] = "NONE",
|
|
596
|
+
autocomplete: bool = False,
|
|
512
597
|
) -> str: ...
|
|
513
598
|
@overload
|
|
514
599
|
async def from_text(
|
|
@@ -521,6 +606,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
521
606
|
max_tokens: int | None = None,
|
|
522
607
|
tools: None = None,
|
|
523
608
|
tool_choice_mode: Literal["NONE"] = "NONE",
|
|
609
|
+
autocomplete: bool = False,
|
|
524
610
|
) -> Json: ...
|
|
525
611
|
@overload
|
|
526
612
|
async def from_text(
|
|
@@ -533,6 +619,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
533
619
|
max_tokens: int | None = None,
|
|
534
620
|
tools: None = None,
|
|
535
621
|
tool_choice_mode: Literal["NONE"] = "NONE",
|
|
622
|
+
autocomplete: bool = False,
|
|
536
623
|
) -> PydanticStructure: ...
|
|
537
624
|
@overload
|
|
538
625
|
async def from_text(
|
|
@@ -545,6 +632,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
545
632
|
max_tokens: int | None = None,
|
|
546
633
|
tools: list[Tool],
|
|
547
634
|
tool_choice_mode: Literal["ANY"],
|
|
635
|
+
autocomplete: bool = False,
|
|
548
636
|
) -> list[FunctionCall]: ...
|
|
549
637
|
|
|
550
638
|
async def from_text(
|
|
@@ -557,6 +645,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
557
645
|
max_tokens: int | None = None,
|
|
558
646
|
tools: list[Tool] | None = None,
|
|
559
647
|
tool_choice_mode: Literal["ANY", "NONE"] = "NONE",
|
|
648
|
+
autocomplete: bool = False,
|
|
560
649
|
):
|
|
561
650
|
return await self.create_value(
|
|
562
651
|
messages=[Content(parts=[Part(text=prompt)], role="user")],
|
|
@@ -566,6 +655,7 @@ class BaseLLMClientAsync(ABC, utils.InheritDecoratorsMixin):
|
|
|
566
655
|
max_tokens=max_tokens,
|
|
567
656
|
tools=tools,
|
|
568
657
|
tool_choice_mode=tool_choice_mode,
|
|
658
|
+
autocomplete=autocomplete,
|
|
569
659
|
)
|
|
570
660
|
|
|
571
661
|
|
|
@@ -586,7 +676,7 @@ class CachedLLMClient(BaseLLMClient):
|
|
|
586
676
|
self.llm_client = llm_client
|
|
587
677
|
self.cache_dir = cache_dir
|
|
588
678
|
|
|
589
|
-
def
|
|
679
|
+
def _create(self, messages: list[Content], **kwargs) -> Response:
|
|
590
680
|
response, messages_dump, cache_path = CachedLLMClient.create_cached(self.llm_client, self.cache_dir, messages, **kwargs)
|
|
591
681
|
if response is not None:
|
|
592
682
|
return response
|
|
@@ -635,8 +725,8 @@ class CachedLLMClientAsync(BaseLLMClientAsync):
|
|
|
635
725
|
self.provider = llm_client.provider
|
|
636
726
|
self.llm_client = llm_client
|
|
637
727
|
self.cache_dir = cache_dir
|
|
638
|
-
|
|
639
|
-
async def
|
|
728
|
+
|
|
729
|
+
async def _create(self, messages: list[Content], **kwargs) -> Response:
|
|
640
730
|
response, messages_dump, cache_path = CachedLLMClient.create_cached(self.llm_client, self.cache_dir, messages, **kwargs)
|
|
641
731
|
if response is not None:
|
|
642
732
|
return response
|
|
@@ -103,7 +103,7 @@ class BedrockLLMClient(BaseLLMClient):
|
|
|
103
103
|
return self._api_key
|
|
104
104
|
|
|
105
105
|
@_error_handler
|
|
106
|
-
def
|
|
106
|
+
def _create(
|
|
107
107
|
self,
|
|
108
108
|
messages: list[Content],
|
|
109
109
|
result_type: ResultType = None,
|
|
@@ -399,7 +399,7 @@ class BedrockLLMClientAsync(BaseLLMClientAsync):
|
|
|
399
399
|
return self._api_key
|
|
400
400
|
|
|
401
401
|
@_error_handler_async
|
|
402
|
-
async def
|
|
402
|
+
async def _create(
|
|
403
403
|
self,
|
|
404
404
|
messages: list[Content],
|
|
405
405
|
result_type: ResultType = None,
|
|
@@ -87,7 +87,7 @@ class GoogleLLMClient(BaseLLMClient):
|
|
|
87
87
|
return new_messages
|
|
88
88
|
|
|
89
89
|
@_error_handler
|
|
90
|
-
def
|
|
90
|
+
def _create(
|
|
91
91
|
self,
|
|
92
92
|
messages: list[Content],
|
|
93
93
|
result_type: ResultType = None,
|
|
@@ -240,7 +240,7 @@ class GoogleLLMClientAsync(BaseLLMClientAsync):
|
|
|
240
240
|
return self._api_key
|
|
241
241
|
|
|
242
242
|
@_error_handler_async
|
|
243
|
-
async def
|
|
243
|
+
async def _create(
|
|
244
244
|
self,
|
|
245
245
|
messages: list[Content],
|
|
246
246
|
result_type: ResultType = None,
|
|
@@ -142,7 +142,7 @@ class OpenaiLLMClient(BaseLLMClient):
|
|
|
142
142
|
return openai_thinking_config
|
|
143
143
|
|
|
144
144
|
@_error_handler
|
|
145
|
-
def
|
|
145
|
+
def _create(
|
|
146
146
|
self,
|
|
147
147
|
messages: list[Content],
|
|
148
148
|
result_type: ResultType = None,
|
|
@@ -377,7 +377,7 @@ class OpenaiLLMClientAsync(BaseLLMClientAsync):
|
|
|
377
377
|
return self._api_key
|
|
378
378
|
|
|
379
379
|
@_error_handler_async
|
|
380
|
-
async def
|
|
380
|
+
async def _create(
|
|
381
381
|
self,
|
|
382
382
|
messages: list[Content],
|
|
383
383
|
result_type: ResultType = None,
|
|
@@ -143,6 +143,7 @@ class UsageMetadata(BaseModel):
|
|
|
143
143
|
cached_content_token_count: Optional[int] = None
|
|
144
144
|
candidates_token_count: Optional[int] = None
|
|
145
145
|
prompt_token_count: Optional[int] = None
|
|
146
|
+
thoughts_token_count: Optional[int] = None
|
|
146
147
|
total_token_count: Optional[int] = None
|
|
147
148
|
|
|
148
149
|
class ThinkingConfig(BaseModel):
|
|
@@ -7,20 +7,20 @@ promptbuilder/agent/context.py,sha256=CVw715vFrhfvddQmRNy4A1U87GsZyIKj9Xu4SCidbc
|
|
|
7
7
|
promptbuilder/agent/tool.py,sha256=VDbIHK3_Q62Ei7hwLF7nIgHq-PTMKnv1NSjHpDYkUZE,2651
|
|
8
8
|
promptbuilder/agent/utils.py,sha256=vTkphKw04v_QDIJtoB2JKK0RGY6iI1t_0LbmuStunzI,356
|
|
9
9
|
promptbuilder/llm_client/__init__.py,sha256=wJ33cnRtZX_YPsbcGxEu3SEZMOhPX7-fHI59MEPUe7I,517
|
|
10
|
-
promptbuilder/llm_client/aisuite_client.py,sha256=
|
|
11
|
-
promptbuilder/llm_client/anthropic_client.py,sha256=
|
|
12
|
-
promptbuilder/llm_client/base_client.py,sha256=
|
|
13
|
-
promptbuilder/llm_client/bedrock_client.py,sha256=
|
|
10
|
+
promptbuilder/llm_client/aisuite_client.py,sha256=_TfB1xv73kIn4n8ulV3bj8bHOVm4DOD5uIdX-pbYoXY,15563
|
|
11
|
+
promptbuilder/llm_client/anthropic_client.py,sha256=HSIAZVOQoi3hinjhBVGqpt91k0x38xj6EUsPSUrlAA0,28076
|
|
12
|
+
promptbuilder/llm_client/base_client.py,sha256=0U449lHLjl3llyjQlB4hkMGOWl8cE-BtdT7L14AtIco,27835
|
|
13
|
+
promptbuilder/llm_client/bedrock_client.py,sha256=rJMzVV7x_sNJ1nVVqWU1sU-Pq7xlxFxIa_hTD2wtM1Y,27943
|
|
14
14
|
promptbuilder/llm_client/config.py,sha256=exQEm35wp7lK5SfXNpN5H9VZEb2LVa4pyZ-cxGt1U-U,1124
|
|
15
15
|
promptbuilder/llm_client/exceptions.py,sha256=t-X7r_a8B1jNu8eEavde1jXu5dz97yV3IG4YHOtgh0Y,4836
|
|
16
|
-
promptbuilder/llm_client/google_client.py,sha256=
|
|
16
|
+
promptbuilder/llm_client/google_client.py,sha256=GzKd_EeJY_GEoZrI6I3ZUAk1PRDBBkdJfEPUKgmlUXM,11782
|
|
17
17
|
promptbuilder/llm_client/logfire_decorators.py,sha256=un_QnIekypOEcqTZ5v1y9pwijGnF95xwnwKO5rFSHVY,9667
|
|
18
18
|
promptbuilder/llm_client/main.py,sha256=5r_MhKVTD4cS90AHR89JJRKiWYBk35Y3JvhvmOxkYHc,8110
|
|
19
|
-
promptbuilder/llm_client/openai_client.py,sha256=
|
|
20
|
-
promptbuilder/llm_client/types.py,sha256=
|
|
19
|
+
promptbuilder/llm_client/openai_client.py,sha256=lT0RCiixJBoCtzUbL_0J5NQ5G8KGONzK3dQ73_NgL78,24789
|
|
20
|
+
promptbuilder/llm_client/types.py,sha256=kgbg5FRzvZwu98y1OhAZJDneXBNPnsFZueQCr9HXIY4,8063
|
|
21
21
|
promptbuilder/llm_client/utils.py,sha256=79lvSppjrrItHB5MIozbp_5Oq7TsOK4Qzt9Ae3XMLFw,7624
|
|
22
|
-
promptbuilder-0.4.
|
|
23
|
-
promptbuilder-0.4.
|
|
24
|
-
promptbuilder-0.4.
|
|
25
|
-
promptbuilder-0.4.
|
|
26
|
-
promptbuilder-0.4.
|
|
22
|
+
promptbuilder-0.4.24.dist-info/licenses/LICENSE,sha256=fqXmInzgsvEOIaKSBgcrwKyYCGYF0MKErJ0YivtODcc,1096
|
|
23
|
+
promptbuilder-0.4.24.dist-info/METADATA,sha256=pHr-63JQFZe_KY1u-uDkq41wu4FnBgyxGHZMAuRddfk,3729
|
|
24
|
+
promptbuilder-0.4.24.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
25
|
+
promptbuilder-0.4.24.dist-info/top_level.txt,sha256=UBVcYn4UgrPy3O3fmmnPEU_kieuplBMgheetIMei4EI,14
|
|
26
|
+
promptbuilder-0.4.24.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|