promptlayer 1.0.28__tar.gz → 1.0.30__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of promptlayer might be problematic. Click here for more details.
- {promptlayer-1.0.28 → promptlayer-1.0.30}/PKG-INFO +2 -1
- {promptlayer-1.0.28 → promptlayer-1.0.30}/promptlayer/__init__.py +1 -1
- {promptlayer-1.0.28 → promptlayer-1.0.30}/promptlayer/promptlayer.py +73 -12
- {promptlayer-1.0.28 → promptlayer-1.0.30}/promptlayer/utils.py +55 -60
- {promptlayer-1.0.28 → promptlayer-1.0.30}/pyproject.toml +2 -1
- {promptlayer-1.0.28 → promptlayer-1.0.30}/LICENSE +0 -0
- {promptlayer-1.0.28 → promptlayer-1.0.30}/README.md +0 -0
- {promptlayer-1.0.28 → promptlayer-1.0.30}/promptlayer/groups/__init__.py +0 -0
- {promptlayer-1.0.28 → promptlayer-1.0.30}/promptlayer/groups/groups.py +0 -0
- {promptlayer-1.0.28 → promptlayer-1.0.30}/promptlayer/promptlayer_base.py +0 -0
- {promptlayer-1.0.28 → promptlayer-1.0.30}/promptlayer/span_exporter.py +0 -0
- {promptlayer-1.0.28 → promptlayer-1.0.30}/promptlayer/templates.py +0 -0
- {promptlayer-1.0.28 → promptlayer-1.0.30}/promptlayer/track/__init__.py +0 -0
- {promptlayer-1.0.28 → promptlayer-1.0.30}/promptlayer/track/track.py +0 -0
- {promptlayer-1.0.28 → promptlayer-1.0.30}/promptlayer/types/__init__.py +0 -0
- {promptlayer-1.0.28 → promptlayer-1.0.30}/promptlayer/types/prompt_template.py +0 -0
- {promptlayer-1.0.28 → promptlayer-1.0.30}/promptlayer/types/request_log.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: promptlayer
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.30
|
|
4
4
|
Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
|
|
5
5
|
License: Apache-2.0
|
|
6
6
|
Author: Magniv
|
|
@@ -16,6 +16,7 @@ Classifier: Programming Language :: Python :: 3.13
|
|
|
16
16
|
Requires-Dist: ably (>=2.0.6,<3.0.0)
|
|
17
17
|
Requires-Dist: aiohttp (>=3.10.10,<4.0.0)
|
|
18
18
|
Requires-Dist: httpx (>=0.27.2,<0.28.0)
|
|
19
|
+
Requires-Dist: nest-asyncio (>=1.6.0,<2.0.0)
|
|
19
20
|
Requires-Dist: opentelemetry-api (>=1.26.0,<2.0.0)
|
|
20
21
|
Requires-Dist: opentelemetry-sdk (>=1.26.0,<2.0.0)
|
|
21
22
|
Requires-Dist: requests (>=2.31.0,<3.0.0)
|
|
@@ -5,6 +5,7 @@ from copy import deepcopy
|
|
|
5
5
|
from functools import wraps
|
|
6
6
|
from typing import Any, Dict, List, Literal, Optional, Union
|
|
7
7
|
|
|
8
|
+
import nest_asyncio
|
|
8
9
|
from opentelemetry.sdk.resources import Resource
|
|
9
10
|
from opentelemetry.sdk.trace import TracerProvider
|
|
10
11
|
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
|
@@ -20,12 +21,12 @@ from promptlayer.utils import (
|
|
|
20
21
|
anthropic_request,
|
|
21
22
|
anthropic_stream_completion,
|
|
22
23
|
anthropic_stream_message,
|
|
24
|
+
arun_workflow_request,
|
|
23
25
|
autil_log_request,
|
|
24
26
|
azure_openai_request,
|
|
25
27
|
openai_request,
|
|
26
28
|
openai_stream_chat,
|
|
27
29
|
openai_stream_completion,
|
|
28
|
-
run_workflow_async,
|
|
29
30
|
stream_response,
|
|
30
31
|
track_request,
|
|
31
32
|
util_log_request,
|
|
@@ -173,7 +174,12 @@ class PromptLayer:
|
|
|
173
174
|
|
|
174
175
|
@staticmethod
|
|
175
176
|
def _prepare_llm_request_params(
|
|
176
|
-
*,
|
|
177
|
+
*,
|
|
178
|
+
prompt_blueprint,
|
|
179
|
+
prompt_template,
|
|
180
|
+
prompt_blueprint_model,
|
|
181
|
+
model_parameter_overrides,
|
|
182
|
+
stream,
|
|
177
183
|
):
|
|
178
184
|
provider = prompt_blueprint_model["provider"]
|
|
179
185
|
kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
|
|
@@ -182,6 +188,9 @@ class PromptLayer:
|
|
|
182
188
|
if provider_base_url := prompt_blueprint.get("provider_base_url"):
|
|
183
189
|
kwargs["base_url"] = provider_base_url["url"]
|
|
184
190
|
|
|
191
|
+
if model_parameter_overrides:
|
|
192
|
+
kwargs.update(model_parameter_overrides)
|
|
193
|
+
|
|
185
194
|
kwargs["stream"] = stream
|
|
186
195
|
if stream and provider in ["openai", "openai.azure"]:
|
|
187
196
|
kwargs["stream_options"] = {"include_usage": True}
|
|
@@ -235,6 +244,7 @@ class PromptLayer:
|
|
|
235
244
|
prompt_version: Union[int, None] = None,
|
|
236
245
|
prompt_release_label: Union[str, None] = None,
|
|
237
246
|
input_variables: Union[Dict[str, Any], None] = None,
|
|
247
|
+
model_parameter_overrides: Union[Dict[str, Any], None] = None,
|
|
238
248
|
tags: Union[List[str], None] = None,
|
|
239
249
|
metadata: Union[Dict[str, str], None] = None,
|
|
240
250
|
group_id: Union[int, None] = None,
|
|
@@ -255,6 +265,7 @@ class PromptLayer:
|
|
|
255
265
|
prompt_blueprint=prompt_blueprint,
|
|
256
266
|
prompt_template=prompt_blueprint["prompt_template"],
|
|
257
267
|
prompt_blueprint_model=prompt_blueprint_model,
|
|
268
|
+
model_parameter_overrides=model_parameter_overrides,
|
|
258
269
|
stream=stream,
|
|
259
270
|
)
|
|
260
271
|
|
|
@@ -343,6 +354,7 @@ class PromptLayer:
|
|
|
343
354
|
prompt_version: Union[int, None] = None,
|
|
344
355
|
prompt_release_label: Union[str, None] = None,
|
|
345
356
|
input_variables: Union[Dict[str, Any], None] = None,
|
|
357
|
+
model_parameter_overrides: Union[Dict[str, Any], None] = None,
|
|
346
358
|
tags: Union[List[str], None] = None,
|
|
347
359
|
metadata: Union[Dict[str, str], None] = None,
|
|
348
360
|
group_id: Union[int, None] = None,
|
|
@@ -353,6 +365,7 @@ class PromptLayer:
|
|
|
353
365
|
"prompt_version": prompt_version,
|
|
354
366
|
"prompt_release_label": prompt_release_label,
|
|
355
367
|
"input_variables": input_variables,
|
|
368
|
+
"model_parameter_overrides": model_parameter_overrides,
|
|
356
369
|
"tags": tags,
|
|
357
370
|
"metadata": metadata,
|
|
358
371
|
"group_id": group_id,
|
|
@@ -384,16 +397,39 @@ class PromptLayer:
|
|
|
384
397
|
return_all_outputs: Optional[bool] = False,
|
|
385
398
|
) -> Dict[str, Any]:
|
|
386
399
|
try:
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
400
|
+
try:
|
|
401
|
+
# Check if we're inside a running event loop
|
|
402
|
+
loop = asyncio.get_running_loop()
|
|
403
|
+
except RuntimeError:
|
|
404
|
+
loop = None
|
|
405
|
+
|
|
406
|
+
if loop and loop.is_running():
|
|
407
|
+
nest_asyncio.apply()
|
|
408
|
+
# If there's an active event loop, use `await` directly
|
|
409
|
+
return asyncio.run(
|
|
410
|
+
arun_workflow_request(
|
|
411
|
+
workflow_name=workflow_name,
|
|
412
|
+
input_variables=input_variables or {},
|
|
413
|
+
metadata=metadata,
|
|
414
|
+
workflow_label_name=workflow_label_name,
|
|
415
|
+
workflow_version_number=workflow_version,
|
|
416
|
+
api_key=self.api_key,
|
|
417
|
+
return_all_outputs=return_all_outputs,
|
|
418
|
+
)
|
|
419
|
+
)
|
|
420
|
+
else:
|
|
421
|
+
# If there's no active event loop, use `asyncio.run()`
|
|
422
|
+
return asyncio.run(
|
|
423
|
+
arun_workflow_request(
|
|
424
|
+
workflow_name=workflow_name,
|
|
425
|
+
input_variables=input_variables or {},
|
|
426
|
+
metadata=metadata,
|
|
427
|
+
workflow_label_name=workflow_label_name,
|
|
428
|
+
workflow_version_number=workflow_version,
|
|
429
|
+
api_key=self.api_key,
|
|
430
|
+
return_all_outputs=return_all_outputs,
|
|
431
|
+
)
|
|
432
|
+
)
|
|
397
433
|
except Exception as e:
|
|
398
434
|
raise Exception(f"Error running workflow: {str(e)}")
|
|
399
435
|
|
|
@@ -503,6 +539,31 @@ class AsyncPromptLayer:
|
|
|
503
539
|
self.group = AsyncGroupManager(api_key)
|
|
504
540
|
self.track = AsyncTrackManager(api_key)
|
|
505
541
|
|
|
542
|
+
async def run_workflow(
|
|
543
|
+
self,
|
|
544
|
+
workflow_name: str,
|
|
545
|
+
input_variables: Optional[Dict[str, Any]] = None,
|
|
546
|
+
metadata: Optional[Dict[str, str]] = None,
|
|
547
|
+
workflow_label_name: Optional[str] = None,
|
|
548
|
+
workflow_version: Optional[
|
|
549
|
+
int
|
|
550
|
+
] = None, # This is the version number, not the version ID
|
|
551
|
+
return_all_outputs: Optional[bool] = False,
|
|
552
|
+
) -> Dict[str, Any]:
|
|
553
|
+
try:
|
|
554
|
+
result = await arun_workflow_request(
|
|
555
|
+
workflow_name=workflow_name,
|
|
556
|
+
input_variables=input_variables or {},
|
|
557
|
+
metadata=metadata,
|
|
558
|
+
workflow_label_name=workflow_label_name,
|
|
559
|
+
workflow_version_number=workflow_version,
|
|
560
|
+
api_key=self.api_key,
|
|
561
|
+
return_all_outputs=return_all_outputs,
|
|
562
|
+
)
|
|
563
|
+
return result
|
|
564
|
+
except Exception as e:
|
|
565
|
+
raise Exception(f"Error running workflow: {str(e)}")
|
|
566
|
+
|
|
506
567
|
async def log_request(
|
|
507
568
|
self,
|
|
508
569
|
*,
|
|
@@ -10,10 +10,10 @@ from copy import deepcopy
|
|
|
10
10
|
from enum import Enum
|
|
11
11
|
from typing import Any, Callable, Dict, Generator, List, Optional, Union
|
|
12
12
|
|
|
13
|
-
import aiohttp
|
|
14
13
|
import httpx
|
|
15
14
|
import requests
|
|
16
15
|
from ably import AblyRealtime
|
|
16
|
+
from ably.types.message import Message
|
|
17
17
|
from opentelemetry import context, trace
|
|
18
18
|
|
|
19
19
|
from promptlayer.types import RequestLog
|
|
@@ -30,41 +30,16 @@ URL_API_PROMPTLAYER = os.environ.setdefault(
|
|
|
30
30
|
)
|
|
31
31
|
|
|
32
32
|
|
|
33
|
-
def
|
|
33
|
+
async def arun_workflow_request(
|
|
34
34
|
*,
|
|
35
35
|
workflow_name: str,
|
|
36
36
|
input_variables: Dict[str, Any],
|
|
37
|
-
metadata: Optional[Dict[str,
|
|
37
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
38
38
|
workflow_label_name: Optional[str] = None,
|
|
39
39
|
workflow_version_number: Optional[int] = None,
|
|
40
40
|
api_key: str,
|
|
41
41
|
return_all_outputs: Optional[bool] = False,
|
|
42
42
|
timeout: Optional[int] = 120,
|
|
43
|
-
) -> Dict[str, Any]:
|
|
44
|
-
return asyncio.run(
|
|
45
|
-
run_workflow_request(
|
|
46
|
-
workflow_name=workflow_name,
|
|
47
|
-
input_variables=input_variables,
|
|
48
|
-
metadata=metadata,
|
|
49
|
-
workflow_label_name=workflow_label_name,
|
|
50
|
-
workflow_version_number=workflow_version_number,
|
|
51
|
-
api_key=api_key,
|
|
52
|
-
return_all_outputs=return_all_outputs,
|
|
53
|
-
timeout=timeout,
|
|
54
|
-
)
|
|
55
|
-
)
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
async def run_workflow_request(
|
|
59
|
-
*,
|
|
60
|
-
workflow_name: str,
|
|
61
|
-
input_variables: Dict[str, Any],
|
|
62
|
-
metadata: Optional[Dict[str, str]] = None,
|
|
63
|
-
workflow_label_name: Optional[str] = None,
|
|
64
|
-
workflow_version_number: Optional[int] = None,
|
|
65
|
-
api_key: str,
|
|
66
|
-
return_all_outputs: Optional[bool] = None,
|
|
67
|
-
timeout: Optional[int] = 120,
|
|
68
43
|
) -> Dict[str, Any]:
|
|
69
44
|
payload = {
|
|
70
45
|
"input_variables": input_variables,
|
|
@@ -78,15 +53,21 @@ async def run_workflow_request(
|
|
|
78
53
|
headers = {"X-API-KEY": api_key}
|
|
79
54
|
|
|
80
55
|
try:
|
|
81
|
-
async with
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
56
|
+
async with httpx.AsyncClient() as client:
|
|
57
|
+
response = await client.post(url, json=payload, headers=headers)
|
|
58
|
+
if response.status_code != 201:
|
|
59
|
+
raise_on_bad_response(
|
|
60
|
+
response,
|
|
61
|
+
"PromptLayer had the following error while running your workflow",
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
result = response.json()
|
|
65
|
+
warning = result.get("warning")
|
|
66
|
+
if warning:
|
|
67
|
+
print(f"WARNING: {warning}")
|
|
68
|
+
|
|
88
69
|
except Exception as e:
|
|
89
|
-
error_message = f"Failed to run workflow: {e}"
|
|
70
|
+
error_message = f"Failed to run workflow: {str(e)}"
|
|
90
71
|
print(error_message)
|
|
91
72
|
raise Exception(error_message)
|
|
92
73
|
|
|
@@ -96,12 +77,24 @@ async def run_workflow_request(
|
|
|
96
77
|
|
|
97
78
|
channel_name = f"workflow_updates:{execution_id}"
|
|
98
79
|
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
80
|
+
# Get WebSocket token
|
|
81
|
+
try:
|
|
82
|
+
async with httpx.AsyncClient() as client:
|
|
83
|
+
ws_response = await client.post(
|
|
84
|
+
f"{URL_API_PROMPTLAYER}/ws-token-request-library",
|
|
85
|
+
headers=headers,
|
|
86
|
+
params={"capability": channel_name},
|
|
87
|
+
)
|
|
88
|
+
if ws_response.status_code != 201:
|
|
89
|
+
raise_on_bad_response(
|
|
90
|
+
ws_response,
|
|
91
|
+
"PromptLayer had the following error while getting WebSocket token",
|
|
92
|
+
)
|
|
93
|
+
token_details = ws_response.json()["token_details"]
|
|
94
|
+
except Exception as e:
|
|
95
|
+
error_message = f"Failed to get WebSocket token: {e}"
|
|
96
|
+
print(error_message)
|
|
97
|
+
raise Exception(error_message)
|
|
105
98
|
|
|
106
99
|
# Initialize Ably client
|
|
107
100
|
ably_client = AblyRealtime(token=token_details["token"])
|
|
@@ -112,7 +105,7 @@ async def run_workflow_request(
|
|
|
112
105
|
final_output = {}
|
|
113
106
|
message_received_event = asyncio.Event()
|
|
114
107
|
|
|
115
|
-
async def message_listener(message):
|
|
108
|
+
async def message_listener(message: Message):
|
|
116
109
|
if message.name == "set_workflow_node_output":
|
|
117
110
|
data = json.loads(message.data)
|
|
118
111
|
if data.get("status") == "workflow_complete":
|
|
@@ -130,7 +123,7 @@ async def run_workflow_request(
|
|
|
130
123
|
await ably_client.close()
|
|
131
124
|
raise Exception("Workflow execution did not complete properly")
|
|
132
125
|
|
|
133
|
-
# Unsubscribe from the channel
|
|
126
|
+
# Unsubscribe from the channel and close the client
|
|
134
127
|
channel.unsubscribe("set_workflow_node_output", message_listener)
|
|
135
128
|
await ably_client.close()
|
|
136
129
|
|
|
@@ -761,7 +754,9 @@ def warn_on_bad_response(request_response, main_message):
|
|
|
761
754
|
def raise_on_bad_response(request_response, main_message):
|
|
762
755
|
if hasattr(request_response, "json"):
|
|
763
756
|
try:
|
|
764
|
-
raise Exception(
|
|
757
|
+
raise Exception(
|
|
758
|
+
f"{main_message}: {request_response.json().get('message') or request_response.json().get('error')}"
|
|
759
|
+
)
|
|
765
760
|
except json.JSONDecodeError:
|
|
766
761
|
raise Exception(f"{main_message}: {request_response}")
|
|
767
762
|
else:
|
|
@@ -953,7 +948,11 @@ async def aget_prompt_template(
|
|
|
953
948
|
headers={"X-API-KEY": api_key},
|
|
954
949
|
json=json_body,
|
|
955
950
|
)
|
|
956
|
-
|
|
951
|
+
if response.status_code != 200:
|
|
952
|
+
raise_on_bad_response(
|
|
953
|
+
response,
|
|
954
|
+
"PromptLayer had the following error while getting your prompt template",
|
|
955
|
+
)
|
|
957
956
|
warning = response.json().get("warning", None)
|
|
958
957
|
if warning:
|
|
959
958
|
warn_on_bad_response(
|
|
@@ -961,10 +960,6 @@ async def aget_prompt_template(
|
|
|
961
960
|
"WARNING: While getting your prompt template",
|
|
962
961
|
)
|
|
963
962
|
return response.json()
|
|
964
|
-
except httpx.HTTPStatusError as e:
|
|
965
|
-
raise Exception(
|
|
966
|
-
f"PromptLayer had the following error while getting your prompt template: {e.response.text}"
|
|
967
|
-
) from e
|
|
968
963
|
except httpx.RequestError as e:
|
|
969
964
|
raise Exception(
|
|
970
965
|
f"PromptLayer had the following error while getting your prompt template: {str(e)}"
|
|
@@ -1015,12 +1010,12 @@ async def apublish_prompt_template(
|
|
|
1015
1010
|
raise Exception(
|
|
1016
1011
|
f"PromptLayer had the following error while publishing your prompt template: {response.text}"
|
|
1017
1012
|
)
|
|
1018
|
-
response.
|
|
1013
|
+
if response.status_code != 201:
|
|
1014
|
+
raise_on_bad_response(
|
|
1015
|
+
response,
|
|
1016
|
+
"PromptLayer had the following error while publishing your prompt template",
|
|
1017
|
+
)
|
|
1019
1018
|
return response.json()
|
|
1020
|
-
except httpx.HTTPStatusError as e:
|
|
1021
|
-
raise Exception(
|
|
1022
|
-
f"PromptLayer had the following error while publishing your prompt template: {e.response.text}"
|
|
1023
|
-
) from e
|
|
1024
1019
|
except httpx.RequestError as e:
|
|
1025
1020
|
raise Exception(
|
|
1026
1021
|
f"PromptLayer had the following error while publishing your prompt template: {str(e)}"
|
|
@@ -1058,13 +1053,13 @@ async def aget_all_prompt_templates(
|
|
|
1058
1053
|
headers={"X-API-KEY": api_key},
|
|
1059
1054
|
params={"page": page, "per_page": per_page},
|
|
1060
1055
|
)
|
|
1061
|
-
response.
|
|
1056
|
+
if response.status_code != 200:
|
|
1057
|
+
raise_on_bad_response(
|
|
1058
|
+
response,
|
|
1059
|
+
"PromptLayer had the following error while getting all your prompt templates",
|
|
1060
|
+
)
|
|
1062
1061
|
items = response.json().get("items", [])
|
|
1063
1062
|
return items
|
|
1064
|
-
except httpx.HTTPStatusError as e:
|
|
1065
|
-
raise Exception(
|
|
1066
|
-
f"PromptLayer had the following error while getting all your prompt templates: {e.response.text}"
|
|
1067
|
-
) from e
|
|
1068
1063
|
except httpx.RequestError as e:
|
|
1069
1064
|
raise Exception(
|
|
1070
1065
|
f"PromptLayer had the following error while getting all your prompt templates: {str(e)}"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "promptlayer"
|
|
3
|
-
version = "1.0.
|
|
3
|
+
version = "1.0.30"
|
|
4
4
|
description = "PromptLayer is a platform for prompt engineering and tracks your LLM requests."
|
|
5
5
|
authors = ["Magniv <hello@magniv.io>"]
|
|
6
6
|
license = "Apache-2.0"
|
|
@@ -14,6 +14,7 @@ opentelemetry-sdk = "^1.26.0"
|
|
|
14
14
|
ably = "^2.0.6"
|
|
15
15
|
aiohttp = "^3.10.10"
|
|
16
16
|
httpx = "^0.27.2"
|
|
17
|
+
nest-asyncio = "^1.6.0"
|
|
17
18
|
|
|
18
19
|
[tool.poetry.group.dev.dependencies]
|
|
19
20
|
langchain = "^0.0.260"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|