promptbuilder 0.4.20__tar.gz → 0.4.21__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {promptbuilder-0.4.20/promptbuilder.egg-info → promptbuilder-0.4.21}/PKG-INFO +1 -1
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/llm_client/anthropic_client.py +6 -2
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/llm_client/bedrock_client.py +19 -7
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/llm_client/google_client.py +6 -2
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/llm_client/main.py +2 -8
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/llm_client/openai_client.py +6 -2
- {promptbuilder-0.4.20 → promptbuilder-0.4.21/promptbuilder.egg-info}/PKG-INFO +1 -1
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/setup.py +1 -1
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/LICENSE +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/MANIFEST.in +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/Readme.md +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/__init__.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/agent/__init__.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/agent/agent.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/agent/context.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/agent/tool.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/agent/utils.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/embeddings.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/llm_client/__init__.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/llm_client/aisuite_client.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/llm_client/base_client.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/llm_client/config.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/llm_client/exceptions.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/llm_client/logfire_decorators.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/llm_client/types.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/llm_client/utils.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/prompt_builder.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder.egg-info/SOURCES.txt +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder.egg-info/dependency_links.txt +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder.egg-info/requires.txt +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder.egg-info/top_level.txt +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/pyproject.toml +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/setup.cfg +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/tests/test_llm_client.py +0 -0
- {promptbuilder-0.4.20 → promptbuilder-0.4.21}/tests/test_llm_client_async.py +0 -0
|
@@ -136,13 +136,15 @@ class AnthropicLLMClient(BaseLLMClient):
|
|
|
136
136
|
def __init__(
|
|
137
137
|
self,
|
|
138
138
|
model: str,
|
|
139
|
-
api_key: str =
|
|
139
|
+
api_key: str | None = None,
|
|
140
140
|
decorator_configs: DecoratorConfigs | None = None,
|
|
141
141
|
default_thinking_config: ThinkingConfig | None = None,
|
|
142
142
|
default_max_tokens: int | None = None,
|
|
143
143
|
default_max_tokens_strategy: DefaultMaxTokensStrategy = AnthropicDefaultMaxTokensStrategy(),
|
|
144
144
|
**kwargs,
|
|
145
145
|
):
|
|
146
|
+
if api_key is None:
|
|
147
|
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
146
148
|
if api_key is None or not isinstance(api_key, str):
|
|
147
149
|
raise ValueError("To create an anthropic llm client you need to either set the environment variable ANTHROPIC_API_KEY or pass the api_key in string format")
|
|
148
150
|
super().__init__(AnthropicLLMClient.PROVIDER, model, decorator_configs=decorator_configs, default_thinking_config=default_thinking_config, default_max_tokens=default_max_tokens)
|
|
@@ -422,13 +424,15 @@ class AnthropicLLMClientAsync(BaseLLMClientAsync):
|
|
|
422
424
|
def __init__(
|
|
423
425
|
self,
|
|
424
426
|
model: str,
|
|
425
|
-
api_key: str =
|
|
427
|
+
api_key: str | None = None,
|
|
426
428
|
decorator_configs: DecoratorConfigs | None = None,
|
|
427
429
|
default_thinking_config: ThinkingConfig | None = None,
|
|
428
430
|
default_max_tokens: int | None = None,
|
|
429
431
|
default_max_tokens_strategy: DefaultMaxTokensStrategy = AnthropicDefaultMaxTokensStrategy(),
|
|
430
432
|
**kwargs,
|
|
431
433
|
):
|
|
434
|
+
if api_key is None:
|
|
435
|
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
432
436
|
if api_key is None or not isinstance(api_key, str):
|
|
433
437
|
raise ValueError("To create an anthropic llm client you need to either set the environment variable ANTHROPIC_API_KEY or pass the api_key in string format")
|
|
434
438
|
super().__init__(AnthropicLLMClientAsync.PROVIDER, model, decorator_configs=decorator_configs, default_thinking_config=default_thinking_config, default_max_tokens=default_max_tokens)
|
|
@@ -22,9 +22,9 @@ P = ParamSpec("P")
|
|
|
22
22
|
class BedrockApiKey(BaseModel, CustomApiKey):
|
|
23
23
|
model_config = ConfigDict(frozen=True)
|
|
24
24
|
|
|
25
|
-
aws_access_key_id: str
|
|
26
|
-
aws_secret_access_key: str
|
|
27
|
-
aws_region: str
|
|
25
|
+
aws_access_key_id: str
|
|
26
|
+
aws_secret_access_key: str
|
|
27
|
+
aws_region: str
|
|
28
28
|
|
|
29
29
|
|
|
30
30
|
@inherited_decorator
|
|
@@ -78,13 +78,19 @@ class BedrockLLMClient(BaseLLMClient):
|
|
|
78
78
|
def __init__(
|
|
79
79
|
self,
|
|
80
80
|
model: str,
|
|
81
|
-
api_key: BedrockApiKey =
|
|
81
|
+
api_key: BedrockApiKey | None = None,
|
|
82
82
|
decorator_configs: DecoratorConfigs | None = None,
|
|
83
83
|
default_thinking_config: ThinkingConfig | None = None,
|
|
84
84
|
default_max_tokens: int | None = None,
|
|
85
85
|
**kwargs,
|
|
86
86
|
):
|
|
87
|
-
if api_key is None
|
|
87
|
+
if api_key is None:
|
|
88
|
+
api_key = BedrockApiKey(
|
|
89
|
+
aws_access_key_id=os.getenv("AWS_ACCESS_KEY_ID"),
|
|
90
|
+
aws_secret_access_key=os.getenv("AWS_SECRET_ACCESS_KEY"),
|
|
91
|
+
aws_region=os.getenv("AWS_DEFAULT_REGION", "us-east-1"),
|
|
92
|
+
)
|
|
93
|
+
if not isinstance(api_key, BedrockApiKey):
|
|
88
94
|
raise ValueError(
|
|
89
95
|
"To create a bedrock llm client you need to either set the environment variables "
|
|
90
96
|
"AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and optional AWS_DEFAULT_REGION or pass the api_key as BedrockApiKey instance"
|
|
@@ -363,13 +369,19 @@ class BedrockLLMClientAsync(BaseLLMClientAsync):
|
|
|
363
369
|
def __init__(
|
|
364
370
|
self,
|
|
365
371
|
model: str,
|
|
366
|
-
api_key: BedrockApiKey =
|
|
372
|
+
api_key: BedrockApiKey | None = None,
|
|
367
373
|
decorator_configs: DecoratorConfigs | None = None,
|
|
368
374
|
default_thinking_config: ThinkingConfig | None = None,
|
|
369
375
|
default_max_tokens: int | None = None,
|
|
370
376
|
**kwargs,
|
|
371
377
|
):
|
|
372
|
-
if api_key is None
|
|
378
|
+
if api_key is None:
|
|
379
|
+
api_key = BedrockApiKey(
|
|
380
|
+
aws_access_key_id=os.getenv("AWS_ACCESS_KEY_ID"),
|
|
381
|
+
aws_secret_access_key=os.getenv("AWS_SECRET_ACCESS_KEY"),
|
|
382
|
+
aws_region=os.getenv("AWS_DEFAULT_REGION", "us-east-1"),
|
|
383
|
+
)
|
|
384
|
+
if not isinstance(api_key, BedrockApiKey):
|
|
373
385
|
raise ValueError(
|
|
374
386
|
"To create a bedrock llm client you need to either set the environment variables "
|
|
375
387
|
"AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and optional AWS_DEFAULT_REGION or pass the api_key as BedrockApiKey instance"
|
|
@@ -45,12 +45,14 @@ class GoogleLLMClient(BaseLLMClient):
|
|
|
45
45
|
def __init__(
|
|
46
46
|
self,
|
|
47
47
|
model: str,
|
|
48
|
-
api_key: str =
|
|
48
|
+
api_key: str | None = None,
|
|
49
49
|
decorator_configs: DecoratorConfigs | None = None,
|
|
50
50
|
default_thinking_config: ThinkingConfig | None = None,
|
|
51
51
|
default_max_tokens: int | None = None,
|
|
52
52
|
**kwargs,
|
|
53
53
|
):
|
|
54
|
+
if api_key is None:
|
|
55
|
+
api_key = os.getenv("GOOGLE_API_KEY")
|
|
54
56
|
if api_key is None or not isinstance(api_key, str):
|
|
55
57
|
raise ValueError("To create a google llm client you need to either set the environment variable GOOGLE_API_KEY or pass the api_key in string format")
|
|
56
58
|
super().__init__(GoogleLLMClient.PROVIDER, model, decorator_configs=decorator_configs, default_thinking_config=default_thinking_config, default_max_tokens=default_max_tokens)
|
|
@@ -219,12 +221,14 @@ class GoogleLLMClientAsync(BaseLLMClientAsync):
|
|
|
219
221
|
def __init__(
|
|
220
222
|
self,
|
|
221
223
|
model: str,
|
|
222
|
-
api_key: str =
|
|
224
|
+
api_key: str | None = None,
|
|
223
225
|
decorator_configs: DecoratorConfigs | None = None,
|
|
224
226
|
default_thinking_config: ThinkingConfig | None = None,
|
|
225
227
|
default_max_tokens: int | None = None,
|
|
226
228
|
**kwargs,
|
|
227
229
|
):
|
|
230
|
+
if api_key is None:
|
|
231
|
+
api_key = os.getenv("GOOGLE_API_KEY")
|
|
228
232
|
if api_key is None or not isinstance(api_key, str):
|
|
229
233
|
raise ValueError("To create a google llm client you need to either set the environment variable GOOGLE_API_KEY or pass the api_key in string format")
|
|
230
234
|
super().__init__(GoogleLLMClientAsync.PROVIDER, model, decorator_configs=decorator_configs, default_thinking_config=default_thinking_config, default_max_tokens=default_max_tokens)
|
|
@@ -40,10 +40,7 @@ def get_client(
|
|
|
40
40
|
provider, model = full_model_name.split(":", 1)
|
|
41
41
|
if provider in provider_to_client_class:
|
|
42
42
|
client_class = provider_to_client_class[provider]
|
|
43
|
-
|
|
44
|
-
client = client_class(model, **kwargs)
|
|
45
|
-
else:
|
|
46
|
-
client = client_class(model, api_key, **kwargs)
|
|
43
|
+
client = client_class(model, api_key, **kwargs)
|
|
47
44
|
else:
|
|
48
45
|
if api_key is None:
|
|
49
46
|
raise ValueError(f"You should directly provide api_key for this provider: {provider}")
|
|
@@ -87,10 +84,7 @@ def get_async_client(
|
|
|
87
84
|
provider, model = full_model_name.split(":", 1)
|
|
88
85
|
if provider in provider_to_client_class:
|
|
89
86
|
client_class = provider_to_client_class[provider]
|
|
90
|
-
|
|
91
|
-
client = client_class(model, **kwargs)
|
|
92
|
-
else:
|
|
93
|
-
client = client_class(model, api_key, **kwargs)
|
|
87
|
+
client = client_class(model, api_key, **kwargs)
|
|
94
88
|
else:
|
|
95
89
|
if api_key is None:
|
|
96
90
|
raise ValueError(f"You should directly provide api_key for this provider: {provider}")
|
|
@@ -69,12 +69,14 @@ class OpenaiLLMClient(BaseLLMClient):
|
|
|
69
69
|
def __init__(
|
|
70
70
|
self,
|
|
71
71
|
model: str,
|
|
72
|
-
api_key: str =
|
|
72
|
+
api_key: str | None = None,
|
|
73
73
|
decorator_configs: DecoratorConfigs | None = None,
|
|
74
74
|
default_thinking_config: ThinkingConfig | None = None,
|
|
75
75
|
default_max_tokens: int | None = None,
|
|
76
76
|
**kwargs,
|
|
77
77
|
):
|
|
78
|
+
if api_key is None:
|
|
79
|
+
api_key = os.getenv("OPENAI_API_KEY")
|
|
78
80
|
if api_key is None or not isinstance(api_key, str):
|
|
79
81
|
raise ValueError("To create an openai llm client you need to either set the environment variable OPENAI_API_KEY or pass the api_key in string format")
|
|
80
82
|
super().__init__(OpenaiLLMClient.PROVIDER, model, decorator_configs=decorator_configs, default_thinking_config=default_thinking_config, default_max_tokens=default_max_tokens)
|
|
@@ -356,12 +358,14 @@ class OpenaiLLMClientAsync(BaseLLMClientAsync):
|
|
|
356
358
|
def __init__(
|
|
357
359
|
self,
|
|
358
360
|
model: str,
|
|
359
|
-
api_key: str =
|
|
361
|
+
api_key: str | None = None,
|
|
360
362
|
decorator_configs: DecoratorConfigs | None = None,
|
|
361
363
|
default_thinking_config: ThinkingConfig | None = None,
|
|
362
364
|
default_max_tokens: int | None = None,
|
|
363
365
|
**kwargs,
|
|
364
366
|
):
|
|
367
|
+
if api_key is None:
|
|
368
|
+
api_key = os.getenv("OPENAI_API_KEY")
|
|
365
369
|
if api_key is None or not isinstance(api_key, str):
|
|
366
370
|
raise ValueError("To create an openai llm client you need to either set the environment variable OPENAI_API_KEY or pass the api_key in string format")
|
|
367
371
|
super().__init__(OpenaiLLMClientAsync.PROVIDER, model, decorator_configs=decorator_configs, default_thinking_config=default_thinking_config, default_max_tokens=default_max_tokens)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{promptbuilder-0.4.20 → promptbuilder-0.4.21}/promptbuilder/llm_client/logfire_decorators.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|