promptbuilder 0.4.28__py3-none-any.whl → 0.4.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,513 @@
1
+ import json
2
+ from typing import Any
3
+ import os
4
+
5
+ import litellm
6
+ from pydantic import BaseModel
7
+
8
+ from promptbuilder.llm_client.base_client import BaseLLMClient, BaseLLMClientAsync, ResultType
9
+ from promptbuilder.llm_client.types import (
10
+ Response,
11
+ Content,
12
+ Candidate,
13
+ UsageMetadata,
14
+ Part,
15
+ FinishReason,
16
+ ThinkingConfig,
17
+ Tool,
18
+ ToolConfig,
19
+ FunctionCall,
20
+ Role,
21
+ )
22
+ from promptbuilder.llm_client.config import DecoratorConfigs
23
+ from promptbuilder.prompt_builder import PromptBuilder
24
+
25
+
26
+ class LiteLLMLLMClient(BaseLLMClient):
27
+ provider: str = ""
28
+ user_tag: Role = "user"
29
+ assistant_tag: Role = "model"
30
+
31
+ def __init__(
32
+ self,
33
+ full_model_name: str,
34
+ api_key: str | None = None,
35
+ decorator_configs: DecoratorConfigs | None = None,
36
+ default_max_tokens: int | None = None,
37
+ **kwargs,
38
+ ):
39
+ # Parse provider:model into values, keep provider for litellm provider routing (e.g., "ollama").
40
+ provider, model = full_model_name.split(":", 1)
41
+ super().__init__(provider, model, decorator_configs=decorator_configs, default_max_tokens=default_max_tokens)
42
+ self._api_key = api_key or ""
43
+
44
+ @property
45
+ def api_key(self) -> str:
46
+ return self._api_key
47
+
48
+ def _internal_role(self, role: Role) -> str:
49
+ return "user" if role == self.user_tag else "assistant"
50
+
51
+ def _external_role(self, role: str) -> Role:
52
+ return self.user_tag if role == "user" else self.assistant_tag
53
+
54
+ @staticmethod
55
+ def make_function_call(tool_call) -> FunctionCall | None:
56
+ if tool_call is None:
57
+ return None
58
+ if isinstance(tool_call, dict):
59
+ tool_name = tool_call.get("function", {}).get("name")
60
+ arguments = tool_call.get("function", {}).get("arguments")
61
+ tool_call_id = tool_call.get("id")
62
+ else:
63
+ # OpenAI-style object
64
+ tool_name = getattr(getattr(tool_call, "function", None), "name", None)
65
+ arguments = getattr(getattr(tool_call, "function", None), "arguments", None)
66
+ tool_call_id = getattr(tool_call, "id", None)
67
+
68
+ if isinstance(arguments, str):
69
+ try:
70
+ arguments = json.loads(arguments)
71
+ except Exception:
72
+ pass
73
+ if not isinstance(arguments, dict):
74
+ arguments = None
75
+ return FunctionCall(id=tool_call_id, name=tool_name, args=arguments)
76
+
77
+ @staticmethod
78
+ def make_usage_metadata(usage) -> UsageMetadata:
79
+ if usage is None:
80
+ return UsageMetadata()
81
+ # usage could be dict-like or object
82
+ is_dict = isinstance(usage, dict)
83
+ completion_tokens = getattr(usage, "completion_tokens", None) if not is_dict else usage.get("completion_tokens")
84
+ prompt_tokens = getattr(usage, "prompt_tokens", None) if not is_dict else usage.get("prompt_tokens")
85
+ total_tokens = getattr(usage, "total_tokens", None) if not is_dict else usage.get("total_tokens")
86
+ # litellm sometimes returns input_tokens/output_tokens
87
+ if completion_tokens is None and is_dict:
88
+ completion_tokens = usage.get("output_tokens")
89
+ if prompt_tokens is None and is_dict:
90
+ prompt_tokens = usage.get("input_tokens")
91
+ if total_tokens is None and prompt_tokens is not None and completion_tokens is not None:
92
+ total_tokens = prompt_tokens + completion_tokens
93
+ cached_tokens = None
94
+ thoughts_tokens = None
95
+ if is_dict:
96
+ ptd = usage.get("prompt_tokens_details") or {}
97
+ if isinstance(ptd, dict):
98
+ cached_tokens = ptd.get("cached_tokens")
99
+ ctd = usage.get("completion_tokens_details") or {}
100
+ if isinstance(ctd, dict):
101
+ thoughts_tokens = ctd.get("reasoning_tokens") or ctd.get("thinking_tokens")
102
+ return UsageMetadata(
103
+ candidates_token_count=completion_tokens,
104
+ prompt_token_count=prompt_tokens,
105
+ total_token_count=total_tokens,
106
+ cached_content_token_count=cached_tokens,
107
+ thoughts_token_count=thoughts_tokens,
108
+ )
109
+
110
+ @staticmethod
111
+ def _map_finish_reason(reason: Any) -> FinishReason | None:
112
+ if reason is None:
113
+ return None
114
+ # Normalize to string
115
+ if not isinstance(reason, str):
116
+ try:
117
+ reason = str(reason)
118
+ except Exception:
119
+ return None
120
+ reason = reason.lower()
121
+ # Map common OpenAI/LiteLLM reasons to our enum
122
+ if reason == "stop":
123
+ return FinishReason.STOP
124
+ if reason in ("length", "max_tokens"):
125
+ return FinishReason.MAX_TOKENS
126
+ if reason in ("content_filter", "safety"):
127
+ return FinishReason.SAFETY
128
+ if reason in ("tool_calls", "function_call"):
129
+ # Model is asking to call tools/functions; not an error and not max tokens
130
+ return FinishReason.OTHER
131
+ # Unknown reason
132
+ return FinishReason.FINISH_REASON_UNSPECIFIED
133
+
134
+ def _create(
135
+ self,
136
+ messages: list[Content],
137
+ result_type: ResultType = None,
138
+ *,
139
+ thinking_config: ThinkingConfig | None = None,
140
+ system_message: str | None = None,
141
+ max_tokens: int | None = None,
142
+ tools: list[Tool] | None = None,
143
+ tool_config: ToolConfig = ToolConfig(),
144
+ ) -> Response:
145
+ litellm_messages: list[dict[str, str]] = []
146
+ if system_message is not None:
147
+ litellm_messages.append({"role": "system", "content": system_message})
148
+ for message in messages:
149
+ if message.role == "user":
150
+ litellm_messages.append({"role": "user", "content": message.as_str()})
151
+ elif message.role == "model":
152
+ litellm_messages.append({"role": "assistant", "content": message.as_str()})
153
+
154
+ # Compose litellm model string as "provider/model" (e.g., "ollama/llama3.1").
155
+ litellm_model = f"{self.provider}/{self.model}"
156
+ kwargs: dict[str, Any] = {
157
+ "model": litellm_model,
158
+ "messages": litellm_messages,
159
+ }
160
+ if self._api_key:
161
+ kwargs["api_key"] = self._api_key
162
+ # Allow Ollama base URL via env var
163
+ if self.provider == "ollama":
164
+ base_url = os.getenv("OLLAMA_BASE_URL") or os.getenv("LITELLM_OLLAMA_BASE_URL")
165
+ if base_url:
166
+ kwargs["api_base"] = base_url
167
+
168
+ if max_tokens is not None:
169
+ kwargs["max_tokens"] = max_tokens
170
+
171
+ if tools is not None:
172
+ lite_tools = []
173
+ allowed_function_names = None
174
+ if tool_config.function_calling_config is not None:
175
+ allowed_function_names = tool_config.function_calling_config.allowed_function_names
176
+ for tool in tools:
177
+ for func_decl in tool.function_declarations or []:
178
+ if allowed_function_names is None or func_decl.name in allowed_function_names:
179
+ parameters = func_decl.parameters
180
+ if parameters is not None:
181
+ parameters = parameters.model_dump(exclude_none=True)
182
+ else:
183
+ parameters = {"type": "object", "properties": {}, "required": [], "additionalProperties": False}
184
+ lite_tools.append({
185
+ "type": "function",
186
+ "function": {
187
+ "name": func_decl.name,
188
+ "description": func_decl.description,
189
+ "parameters": parameters,
190
+ },
191
+ })
192
+ if lite_tools:
193
+ kwargs["tools"] = lite_tools
194
+ # tool choice mapping
195
+ tool_choice_mode = None
196
+ if tool_config.function_calling_config is not None:
197
+ tool_choice_mode = tool_config.function_calling_config.mode
198
+ if tool_choice_mode == "NONE":
199
+ kwargs["tool_choice"] = "none"
200
+ elif tool_choice_mode == "ANY":
201
+ kwargs["tool_choice"] = "required"
202
+ elif tool_choice_mode == "AUTO":
203
+ kwargs["tool_choice"] = "auto"
204
+
205
+ # LiteLLM does not handle Pydantic parsing server-side; mimic AiSuite approach
206
+ if result_type is None or result_type == "json":
207
+ response: Any = litellm.completion(**kwargs)
208
+
209
+ parts: list[Part] = []
210
+ # LiteLLM returns OpenAI-style choices
211
+ for choice in (getattr(response, "choices", None) or response.get("choices", [])):
212
+ message = getattr(choice, "message", None) if hasattr(choice, "message") else choice.get("message")
213
+ tool_calls = getattr(message, "tool_calls", None) if hasattr(message, "tool_calls") else (message.get("tool_calls") if isinstance(message, dict) else None)
214
+ if tool_calls is not None:
215
+ if not isinstance(tool_calls, list):
216
+ tool_calls = [tool_calls]
217
+ for tool_call in tool_calls:
218
+ fc = self.make_function_call(tool_call)
219
+ if fc is not None:
220
+ parts.append(Part(function_call=fc))
221
+ content = getattr(message, "content", None) if hasattr(message, "content") else (message.get("content") if isinstance(message, dict) else None)
222
+ if content is not None:
223
+ parts.append(Part(text=content))
224
+
225
+ usage = getattr(response, "usage", None) if hasattr(response, "usage") else response.get("usage")
226
+ usage_md = self.make_usage_metadata(usage)
227
+ first_choice = (getattr(response, "choices", None) or response.get("choices", []))[0] if (getattr(response, "choices", None) or response.get("choices")) else None
228
+ role_str = getattr(getattr(first_choice, "message", None), "role", None) if first_choice is not None else None
229
+ if role_str is None and isinstance(first_choice, dict):
230
+ msg0 = first_choice.get("message")
231
+ if isinstance(msg0, dict):
232
+ role_str = msg0.get("role")
233
+ # finish_reason from first choice
234
+ finish_reason_val = None
235
+ if first_choice is not None:
236
+ if isinstance(first_choice, dict):
237
+ finish_reason_val = first_choice.get("finish_reason")
238
+ else:
239
+ finish_reason_val = getattr(first_choice, "finish_reason", None)
240
+ mapped_finish_reason = LiteLLMLLMClient._map_finish_reason(finish_reason_val)
241
+
242
+ content_parts: list[Part | Any] = list(parts)
243
+ return Response(
244
+ candidates=[Candidate(
245
+ content=Content(
246
+ parts=content_parts, # type: ignore[arg-type]
247
+ role=self._external_role(role_str) if role_str else None,
248
+ ),
249
+ finish_reason=mapped_finish_reason,
250
+ )],
251
+ usage_metadata=usage_md,
252
+ )
253
+ elif isinstance(result_type, type(BaseModel)):
254
+ message_with_structure = PromptBuilder().set_structured_output(result_type).build().render()
255
+ kwargs["messages"].append({"role": "user", "content": message_with_structure})
256
+ response: Any = litellm.completion(**kwargs)
257
+
258
+ parts: list[Part] = []
259
+ text = ""
260
+ for choice in (getattr(response, "choices", None) or response.get("choices", [])):
261
+ message = getattr(choice, "message", None) if hasattr(choice, "message") else choice.get("message")
262
+ tool_calls = getattr(message, "tool_calls", None) if hasattr(message, "tool_calls") else (message.get("tool_calls") if isinstance(message, dict) else None)
263
+ if tool_calls is not None:
264
+ if not isinstance(tool_calls, list):
265
+ tool_calls = [tool_calls]
266
+ for tool_call in tool_calls:
267
+ fc = self.make_function_call(tool_call)
268
+ if fc is not None:
269
+ parts.append(Part(function_call=fc))
270
+ content = getattr(message, "content", None) if hasattr(message, "content") else (message.get("content") if isinstance(message, dict) else None)
271
+ if content is not None:
272
+ text += content + "\n"
273
+ parts.append(Part(text=content))
274
+
275
+ parsed = BaseLLMClient.as_json(text)
276
+ parsed_pydantic = result_type.model_validate(parsed)
277
+
278
+ usage = getattr(response, "usage", None) if hasattr(response, "usage") else response.get("usage")
279
+ usage_md = self.make_usage_metadata(usage)
280
+ first_choice = (getattr(response, "choices", None) or response.get("choices", []))[0] if (getattr(response, "choices", None) or response.get("choices")) else None
281
+ role_str = getattr(getattr(first_choice, "message", None), "role", None) if first_choice is not None else None
282
+ if role_str is None and isinstance(first_choice, dict):
283
+ msg0 = first_choice.get("message")
284
+ if isinstance(msg0, dict):
285
+ role_str = msg0.get("role")
286
+ finish_reason_val = None
287
+ if first_choice is not None:
288
+ if isinstance(first_choice, dict):
289
+ finish_reason_val = first_choice.get("finish_reason")
290
+ else:
291
+ finish_reason_val = getattr(first_choice, "finish_reason", None)
292
+ mapped_finish_reason = LiteLLMLLMClient._map_finish_reason(finish_reason_val)
293
+
294
+ content_parts2: list[Part | Any] = list(parts)
295
+ return Response(
296
+ candidates=[Candidate(
297
+ content=Content(
298
+ parts=content_parts2, # type: ignore[arg-type]
299
+ role=self._external_role(role_str) if role_str else None,
300
+ ),
301
+ finish_reason=mapped_finish_reason,
302
+ )],
303
+ usage_metadata=usage_md,
304
+ parsed=parsed_pydantic,
305
+ )
306
+ else:
307
+ raise ValueError(f"Unsupported result_type: {result_type}. Supported types are: None, 'json', or a Pydantic model.")
308
+
309
+
310
+ class LiteLLMLLMClientAsync(BaseLLMClientAsync):
311
+ provider: str = ""
312
+ user_tag: Role = "user"
313
+ assistant_tag: Role = "model"
314
+
315
+ def __init__(
316
+ self,
317
+ full_model_name: str,
318
+ api_key: str | None = None,
319
+ decorator_configs: DecoratorConfigs | None = None,
320
+ default_max_tokens: int | None = None,
321
+ **kwargs,
322
+ ):
323
+ provider, model = full_model_name.split(":", 1)
324
+ # Allow None and rely on env vars
325
+ super().__init__(provider, model, decorator_configs=decorator_configs, default_max_tokens=default_max_tokens)
326
+ self._api_key = api_key or ""
327
+
328
+ @property
329
+ def api_key(self) -> str:
330
+ return self._api_key
331
+
332
+ def _internal_role(self, role: str) -> str:
333
+ return "user" if role == self.user_tag else "assistant"
334
+
335
+ def _external_role(self, role: str) -> Role:
336
+ return self.user_tag if role == "user" else self.assistant_tag
337
+
338
+ @staticmethod
339
+ def make_function_call(tool_call) -> FunctionCall | None:
340
+ return LiteLLMLLMClient.make_function_call(tool_call)
341
+
342
+ @staticmethod
343
+ def make_usage_metadata(usage) -> UsageMetadata:
344
+ return LiteLLMLLMClient.make_usage_metadata(usage)
345
+
346
+ async def _create(
347
+ self,
348
+ messages: list[Content],
349
+ result_type: ResultType = None,
350
+ *,
351
+ thinking_config: ThinkingConfig | None = None,
352
+ system_message: str | None = None,
353
+ max_tokens: int | None = None,
354
+ tools: list[Tool] | None = None,
355
+ tool_config: ToolConfig = ToolConfig(),
356
+ ) -> Response:
357
+ litellm_messages: list[dict[str, str]] = []
358
+ if system_message is not None:
359
+ litellm_messages.append({"role": "system", "content": system_message})
360
+ for message in messages:
361
+ if message.role == "user":
362
+ litellm_messages.append({"role": "user", "content": message.as_str()})
363
+ elif message.role == "model":
364
+ litellm_messages.append({"role": "assistant", "content": message.as_str()})
365
+
366
+ litellm_model = f"{self.provider}/{self.model}"
367
+ kwargs: dict[str, Any] = {
368
+ "model": litellm_model,
369
+ "messages": litellm_messages,
370
+ }
371
+ if self._api_key:
372
+ kwargs["api_key"] = self._api_key
373
+ if self.provider == "ollama":
374
+ base_url = os.getenv("OLLAMA_BASE_URL") or os.getenv("LITELLM_OLLAMA_BASE_URL")
375
+ if base_url:
376
+ kwargs["api_base"] = base_url
377
+
378
+ if max_tokens is not None:
379
+ kwargs["max_tokens"] = max_tokens
380
+
381
+ if tools is not None:
382
+ lite_tools = []
383
+ allowed_function_names = None
384
+ if tool_config.function_calling_config is not None:
385
+ allowed_function_names = tool_config.function_calling_config.allowed_function_names
386
+ for tool in tools:
387
+ for func_decl in tool.function_declarations or []:
388
+ if allowed_function_names is None or func_decl.name in allowed_function_names:
389
+ parameters = func_decl.parameters
390
+ if parameters is not None:
391
+ parameters = parameters.model_dump(exclude_none=True)
392
+ else:
393
+ parameters = {"type": "object", "properties": {}, "required": [], "additionalProperties": False}
394
+ lite_tools.append({
395
+ "type": "function",
396
+ "function": {
397
+ "name": func_decl.name,
398
+ "description": func_decl.description,
399
+ "parameters": parameters,
400
+ },
401
+ })
402
+ if lite_tools:
403
+ kwargs["tools"] = lite_tools
404
+ tool_choice_mode = None
405
+ if tool_config.function_calling_config is not None:
406
+ tool_choice_mode = tool_config.function_calling_config.mode
407
+ if tool_choice_mode == "NONE":
408
+ kwargs["tool_choice"] = "none"
409
+ elif tool_choice_mode == "ANY":
410
+ kwargs["tool_choice"] = "required"
411
+ elif tool_choice_mode == "AUTO":
412
+ kwargs["tool_choice"] = "auto"
413
+
414
+ if result_type is None or result_type == "json":
415
+ response: Any = await litellm.acompletion(**kwargs)
416
+
417
+ parts: list[Part] = []
418
+ for choice in (getattr(response, "choices", None) or response.get("choices", [])):
419
+ message = getattr(choice, "message", None) if hasattr(choice, "message") else choice.get("message")
420
+ tool_calls = getattr(message, "tool_calls", None) if hasattr(message, "tool_calls") else (message.get("tool_calls") if isinstance(message, dict) else None)
421
+ if tool_calls is not None:
422
+ if not isinstance(tool_calls, list):
423
+ tool_calls = [tool_calls]
424
+ for tool_call in tool_calls:
425
+ fc = self.make_function_call(tool_call)
426
+ if fc is not None:
427
+ parts.append(Part(function_call=fc))
428
+ content = getattr(message, "content", None) if hasattr(message, "content") else (message.get("content") if isinstance(message, dict) else None)
429
+ if content is not None:
430
+ parts.append(Part(text=content))
431
+
432
+ usage = getattr(response, "usage", None) if hasattr(response, "usage") else response.get("usage")
433
+ usage_md = self.make_usage_metadata(usage)
434
+ first_choice = (getattr(response, "choices", None) or response.get("choices", []))[0] if (getattr(response, "choices", None) or response.get("choices")) else None
435
+ role_str = getattr(getattr(first_choice, "message", None), "role", None) if first_choice is not None else None
436
+ if role_str is None and isinstance(first_choice, dict):
437
+ msg0 = first_choice.get("message")
438
+ if isinstance(msg0, dict):
439
+ role_str = msg0.get("role")
440
+ finish_reason_val = None
441
+ if first_choice is not None:
442
+ if isinstance(first_choice, dict):
443
+ finish_reason_val = first_choice.get("finish_reason")
444
+ else:
445
+ finish_reason_val = getattr(first_choice, "finish_reason", None)
446
+ mapped_finish_reason = LiteLLMLLMClient._map_finish_reason(finish_reason_val)
447
+
448
+ content_parts3: list[Part | Any] = list(parts)
449
+ return Response(
450
+ candidates=[Candidate(
451
+ content=Content(
452
+ parts=content_parts3, # type: ignore[arg-type]
453
+ role=self._external_role(role_str) if role_str else None,
454
+ ),
455
+ finish_reason=mapped_finish_reason,
456
+ )],
457
+ usage_metadata=usage_md,
458
+ )
459
+ elif isinstance(result_type, type(BaseModel)):
460
+ message_with_structure = PromptBuilder().set_structured_output(result_type).build().render()
461
+ kwargs["messages"].append({"role": "user", "content": message_with_structure})
462
+ response: Any = await litellm.acompletion(**kwargs)
463
+
464
+ parts: list[Part] = []
465
+ text = ""
466
+ for choice in (getattr(response, "choices", None) or response.get("choices", [])):
467
+ message = getattr(choice, "message", None) if hasattr(choice, "message") else choice.get("message")
468
+ tool_calls = getattr(message, "tool_calls", None) if hasattr(message, "tool_calls") else (message.get("tool_calls") if isinstance(message, dict) else None)
469
+ if tool_calls is not None:
470
+ if not isinstance(tool_calls, list):
471
+ tool_calls = [tool_calls]
472
+ for tool_call in tool_calls:
473
+ fc = self.make_function_call(tool_call)
474
+ if fc is not None:
475
+ parts.append(Part(function_call=fc))
476
+ content = getattr(message, "content", None) if hasattr(message, "content") else (message.get("content") if isinstance(message, dict) else None)
477
+ if content is not None:
478
+ text += content + "\n"
479
+ parts.append(Part(text=content))
480
+
481
+ parsed = BaseLLMClient.as_json(text)
482
+ parsed_pydantic = result_type.model_validate(parsed)
483
+
484
+ usage = getattr(response, "usage", None) if hasattr(response, "usage") else response.get("usage")
485
+ usage_md = self.make_usage_metadata(usage)
486
+ first_choice = (getattr(response, "choices", None) or response.get("choices", []))[0] if (getattr(response, "choices", None) or response.get("choices")) else None
487
+ role_str = getattr(getattr(first_choice, "message", None), "role", None) if first_choice is not None else None
488
+ if role_str is None and isinstance(first_choice, dict):
489
+ msg0 = first_choice.get("message")
490
+ if isinstance(msg0, dict):
491
+ role_str = msg0.get("role")
492
+ finish_reason_val = None
493
+ if first_choice is not None:
494
+ if isinstance(first_choice, dict):
495
+ finish_reason_val = first_choice.get("finish_reason")
496
+ else:
497
+ finish_reason_val = getattr(first_choice, "finish_reason", None)
498
+ mapped_finish_reason = LiteLLMLLMClient._map_finish_reason(finish_reason_val)
499
+
500
+ content_parts4: list[Part | Any] = list(parts)
501
+ return Response(
502
+ candidates=[Candidate(
503
+ content=Content(
504
+ parts=content_parts4, # type: ignore[arg-type]
505
+ role=self._external_role(role_str) if role_str else None,
506
+ ),
507
+ finish_reason=mapped_finish_reason,
508
+ )],
509
+ usage_metadata=usage_md,
510
+ parsed=parsed_pydantic,
511
+ )
512
+ else:
513
+ raise ValueError(f"Unsupported result_type: {result_type}. Supported types are: None, 'json', or a Pydantic model.")
@@ -10,6 +10,7 @@ from promptbuilder.llm_client.anthropic_client import AnthropicLLMClient, Anthro
10
10
  from promptbuilder.llm_client.openai_client import OpenaiLLMClient, OpenaiLLMClientAsync
11
11
  from promptbuilder.llm_client.bedrock_client import BedrockLLMClient, BedrockLLMClientAsync
12
12
  from promptbuilder.llm_client.aisuite_client import AiSuiteLLMClient, AiSuiteLLMClientAsync
13
+ from promptbuilder.llm_client.litellm_client import LiteLLMLLMClient, LiteLLMLLMClientAsync
13
14
 
14
15
 
15
16
 
@@ -42,10 +43,7 @@ def get_client(
42
43
  client_class = provider_to_client_class[provider]
43
44
  client = client_class(model, api_key, **kwargs)
44
45
  else:
45
- if api_key is None:
46
- raise ValueError(f"You should directly provide api_key for this provider: {provider}")
47
- else:
48
- client = AiSuiteLLMClient(full_model_name, api_key, **kwargs)
46
+ client = LiteLLMLLMClient(full_model_name, api_key, **kwargs)
49
47
 
50
48
  if (full_model_name, client.api_key) in _memory:
51
49
  client = _memory[(full_model_name, client.api_key)]
@@ -86,10 +84,7 @@ def get_async_client(
86
84
  client_class = provider_to_client_class[provider]
87
85
  client = client_class(model, api_key, **kwargs)
88
86
  else:
89
- if api_key is None:
90
- raise ValueError(f"You should directly provide api_key for this provider: {provider}")
91
- else:
92
- client = AiSuiteLLMClientAsync(full_model_name, api_key, **kwargs)
87
+ client = LiteLLMLLMClientAsync(full_model_name, api_key, **kwargs)
93
88
 
94
89
  if (full_model_name, client.api_key) in _memory_async:
95
90
  client = _memory_async[(full_model_name, client.api_key)]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: promptbuilder
3
- Version: 0.4.28
3
+ Version: 0.4.29
4
4
  Summary: Library for building prompts for LLMs
5
5
  Home-page: https://github.com/kapulkin/promptbuilder
6
6
  Author: Kapulkin Stanislav
@@ -14,13 +14,14 @@ promptbuilder/llm_client/bedrock_client.py,sha256=rJMzVV7x_sNJ1nVVqWU1sU-Pq7xlxF
14
14
  promptbuilder/llm_client/config.py,sha256=exQEm35wp7lK5SfXNpN5H9VZEb2LVa4pyZ-cxGt1U-U,1124
15
15
  promptbuilder/llm_client/exceptions.py,sha256=t-X7r_a8B1jNu8eEavde1jXu5dz97yV3IG4YHOtgh0Y,4836
16
16
  promptbuilder/llm_client/google_client.py,sha256=GzKd_EeJY_GEoZrI6I3ZUAk1PRDBBkdJfEPUKgmlUXM,11782
17
+ promptbuilder/llm_client/litellm_client.py,sha256=WfObiNTzgu4CFPUNeN4TmNBC6o_dPmB5P9DI5k3vcRg,25284
17
18
  promptbuilder/llm_client/logfire_decorators.py,sha256=un_QnIekypOEcqTZ5v1y9pwijGnF95xwnwKO5rFSHVY,9667
18
- promptbuilder/llm_client/main.py,sha256=5r_MhKVTD4cS90AHR89JJRKiWYBk35Y3JvhvmOxkYHc,8110
19
+ promptbuilder/llm_client/main.py,sha256=m-9jM2IYMFy6aZBUmPb52wpFlIK0H1aRj293oFmxLjU,7907
19
20
  promptbuilder/llm_client/openai_client.py,sha256=lT0RCiixJBoCtzUbL_0J5NQ5G8KGONzK3dQ73_NgL78,24789
20
21
  promptbuilder/llm_client/types.py,sha256=kgbg5FRzvZwu98y1OhAZJDneXBNPnsFZueQCr9HXIY4,8063
21
22
  promptbuilder/llm_client/utils.py,sha256=79lvSppjrrItHB5MIozbp_5Oq7TsOK4Qzt9Ae3XMLFw,7624
22
- promptbuilder-0.4.28.dist-info/licenses/LICENSE,sha256=fqXmInzgsvEOIaKSBgcrwKyYCGYF0MKErJ0YivtODcc,1096
23
- promptbuilder-0.4.28.dist-info/METADATA,sha256=O94B_otCBTHQHr5G_-VqiC5a_xj8rCSydUSzG0GABIo,3729
24
- promptbuilder-0.4.28.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
25
- promptbuilder-0.4.28.dist-info/top_level.txt,sha256=UBVcYn4UgrPy3O3fmmnPEU_kieuplBMgheetIMei4EI,14
26
- promptbuilder-0.4.28.dist-info/RECORD,,
23
+ promptbuilder-0.4.29.dist-info/licenses/LICENSE,sha256=fqXmInzgsvEOIaKSBgcrwKyYCGYF0MKErJ0YivtODcc,1096
24
+ promptbuilder-0.4.29.dist-info/METADATA,sha256=rfxQaTbrKl5se6wrV6b2QfMDhtSrxG7WbkG06fGtNCo,3729
25
+ promptbuilder-0.4.29.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
26
+ promptbuilder-0.4.29.dist-info/top_level.txt,sha256=UBVcYn4UgrPy3O3fmmnPEU_kieuplBMgheetIMei4EI,14
27
+ promptbuilder-0.4.29.dist-info/RECORD,,