promptlayer 1.0.27__py3-none-any.whl → 1.0.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of promptlayer might be problematic. Click here for more details.
- promptlayer/__init__.py +1 -1
- promptlayer/promptlayer.py +60 -11
- promptlayer/utils.py +56 -61
- {promptlayer-1.0.27.dist-info → promptlayer-1.0.29.dist-info}/METADATA +2 -1
- {promptlayer-1.0.27.dist-info → promptlayer-1.0.29.dist-info}/RECORD +7 -7
- {promptlayer-1.0.27.dist-info → promptlayer-1.0.29.dist-info}/LICENSE +0 -0
- {promptlayer-1.0.27.dist-info → promptlayer-1.0.29.dist-info}/WHEEL +0 -0
promptlayer/__init__.py
CHANGED
promptlayer/promptlayer.py
CHANGED
|
@@ -5,6 +5,7 @@ from copy import deepcopy
|
|
|
5
5
|
from functools import wraps
|
|
6
6
|
from typing import Any, Dict, List, Literal, Optional, Union
|
|
7
7
|
|
|
8
|
+
import nest_asyncio
|
|
8
9
|
from opentelemetry.sdk.resources import Resource
|
|
9
10
|
from opentelemetry.sdk.trace import TracerProvider
|
|
10
11
|
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
|
@@ -20,12 +21,12 @@ from promptlayer.utils import (
|
|
|
20
21
|
anthropic_request,
|
|
21
22
|
anthropic_stream_completion,
|
|
22
23
|
anthropic_stream_message,
|
|
24
|
+
arun_workflow_request,
|
|
23
25
|
autil_log_request,
|
|
24
26
|
azure_openai_request,
|
|
25
27
|
openai_request,
|
|
26
28
|
openai_stream_chat,
|
|
27
29
|
openai_stream_completion,
|
|
28
|
-
run_workflow_async,
|
|
29
30
|
stream_response,
|
|
30
31
|
track_request,
|
|
31
32
|
util_log_request,
|
|
@@ -384,16 +385,39 @@ class PromptLayer:
|
|
|
384
385
|
return_all_outputs: Optional[bool] = False,
|
|
385
386
|
) -> Dict[str, Any]:
|
|
386
387
|
try:
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
388
|
+
try:
|
|
389
|
+
# Check if we're inside a running event loop
|
|
390
|
+
loop = asyncio.get_running_loop()
|
|
391
|
+
except RuntimeError:
|
|
392
|
+
loop = None
|
|
393
|
+
|
|
394
|
+
if loop and loop.is_running():
|
|
395
|
+
nest_asyncio.apply()
|
|
396
|
+
# If there's an active event loop, use `await` directly
|
|
397
|
+
return asyncio.run(
|
|
398
|
+
arun_workflow_request(
|
|
399
|
+
workflow_name=workflow_name,
|
|
400
|
+
input_variables=input_variables or {},
|
|
401
|
+
metadata=metadata,
|
|
402
|
+
workflow_label_name=workflow_label_name,
|
|
403
|
+
workflow_version_number=workflow_version,
|
|
404
|
+
api_key=self.api_key,
|
|
405
|
+
return_all_outputs=return_all_outputs,
|
|
406
|
+
)
|
|
407
|
+
)
|
|
408
|
+
else:
|
|
409
|
+
# If there's no active event loop, use `asyncio.run()`
|
|
410
|
+
return asyncio.run(
|
|
411
|
+
arun_workflow_request(
|
|
412
|
+
workflow_name=workflow_name,
|
|
413
|
+
input_variables=input_variables or {},
|
|
414
|
+
metadata=metadata,
|
|
415
|
+
workflow_label_name=workflow_label_name,
|
|
416
|
+
workflow_version_number=workflow_version,
|
|
417
|
+
api_key=self.api_key,
|
|
418
|
+
return_all_outputs=return_all_outputs,
|
|
419
|
+
)
|
|
420
|
+
)
|
|
397
421
|
except Exception as e:
|
|
398
422
|
raise Exception(f"Error running workflow: {str(e)}")
|
|
399
423
|
|
|
@@ -503,6 +527,31 @@ class AsyncPromptLayer:
|
|
|
503
527
|
self.group = AsyncGroupManager(api_key)
|
|
504
528
|
self.track = AsyncTrackManager(api_key)
|
|
505
529
|
|
|
530
|
+
async def run_workflow(
|
|
531
|
+
self,
|
|
532
|
+
workflow_name: str,
|
|
533
|
+
input_variables: Optional[Dict[str, Any]] = None,
|
|
534
|
+
metadata: Optional[Dict[str, str]] = None,
|
|
535
|
+
workflow_label_name: Optional[str] = None,
|
|
536
|
+
workflow_version: Optional[
|
|
537
|
+
int
|
|
538
|
+
] = None, # This is the version number, not the version ID
|
|
539
|
+
return_all_outputs: Optional[bool] = False,
|
|
540
|
+
) -> Dict[str, Any]:
|
|
541
|
+
try:
|
|
542
|
+
result = await arun_workflow_request(
|
|
543
|
+
workflow_name=workflow_name,
|
|
544
|
+
input_variables=input_variables or {},
|
|
545
|
+
metadata=metadata,
|
|
546
|
+
workflow_label_name=workflow_label_name,
|
|
547
|
+
workflow_version_number=workflow_version,
|
|
548
|
+
api_key=self.api_key,
|
|
549
|
+
return_all_outputs=return_all_outputs,
|
|
550
|
+
)
|
|
551
|
+
return result
|
|
552
|
+
except Exception as e:
|
|
553
|
+
raise Exception(f"Error running workflow: {str(e)}")
|
|
554
|
+
|
|
506
555
|
async def log_request(
|
|
507
556
|
self,
|
|
508
557
|
*,
|
promptlayer/utils.py
CHANGED
|
@@ -10,10 +10,10 @@ from copy import deepcopy
|
|
|
10
10
|
from enum import Enum
|
|
11
11
|
from typing import Any, Callable, Dict, Generator, List, Optional, Union
|
|
12
12
|
|
|
13
|
-
import aiohttp
|
|
14
13
|
import httpx
|
|
15
14
|
import requests
|
|
16
15
|
from ably import AblyRealtime
|
|
16
|
+
from ably.types.message import Message
|
|
17
17
|
from opentelemetry import context, trace
|
|
18
18
|
|
|
19
19
|
from promptlayer.types import RequestLog
|
|
@@ -30,41 +30,16 @@ URL_API_PROMPTLAYER = os.environ.setdefault(
|
|
|
30
30
|
)
|
|
31
31
|
|
|
32
32
|
|
|
33
|
-
def
|
|
33
|
+
async def arun_workflow_request(
|
|
34
34
|
*,
|
|
35
35
|
workflow_name: str,
|
|
36
36
|
input_variables: Dict[str, Any],
|
|
37
|
-
metadata: Optional[Dict[str,
|
|
37
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
38
38
|
workflow_label_name: Optional[str] = None,
|
|
39
39
|
workflow_version_number: Optional[int] = None,
|
|
40
40
|
api_key: str,
|
|
41
41
|
return_all_outputs: Optional[bool] = False,
|
|
42
42
|
timeout: Optional[int] = 120,
|
|
43
|
-
) -> Dict[str, Any]:
|
|
44
|
-
return asyncio.run(
|
|
45
|
-
run_workflow_request(
|
|
46
|
-
workflow_name=workflow_name,
|
|
47
|
-
input_variables=input_variables,
|
|
48
|
-
metadata=metadata,
|
|
49
|
-
workflow_label_name=workflow_label_name,
|
|
50
|
-
workflow_version_number=workflow_version_number,
|
|
51
|
-
api_key=api_key,
|
|
52
|
-
return_all_outputs=return_all_outputs,
|
|
53
|
-
timeout=timeout,
|
|
54
|
-
)
|
|
55
|
-
)
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
async def run_workflow_request(
|
|
59
|
-
*,
|
|
60
|
-
workflow_name: str,
|
|
61
|
-
input_variables: Dict[str, Any],
|
|
62
|
-
metadata: Optional[Dict[str, str]] = None,
|
|
63
|
-
workflow_label_name: Optional[str] = None,
|
|
64
|
-
workflow_version_number: Optional[int] = None,
|
|
65
|
-
api_key: str,
|
|
66
|
-
return_all_outputs: Optional[bool] = None,
|
|
67
|
-
timeout: Optional[int] = 120,
|
|
68
43
|
) -> Dict[str, Any]:
|
|
69
44
|
payload = {
|
|
70
45
|
"input_variables": input_variables,
|
|
@@ -78,15 +53,21 @@ async def run_workflow_request(
|
|
|
78
53
|
headers = {"X-API-KEY": api_key}
|
|
79
54
|
|
|
80
55
|
try:
|
|
81
|
-
async with
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
56
|
+
async with httpx.AsyncClient() as client:
|
|
57
|
+
response = await client.post(url, json=payload, headers=headers)
|
|
58
|
+
if response.status_code != 201:
|
|
59
|
+
raise_on_bad_response(
|
|
60
|
+
response,
|
|
61
|
+
"PromptLayer had the following error while running your workflow",
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
result = response.json()
|
|
65
|
+
warning = result.get("warning")
|
|
66
|
+
if warning:
|
|
67
|
+
print(f"WARNING: {warning}")
|
|
68
|
+
|
|
88
69
|
except Exception as e:
|
|
89
|
-
error_message = f"Failed to run workflow: {e}"
|
|
70
|
+
error_message = f"Failed to run workflow: {str(e)}"
|
|
90
71
|
print(error_message)
|
|
91
72
|
raise Exception(error_message)
|
|
92
73
|
|
|
@@ -96,12 +77,24 @@ async def run_workflow_request(
|
|
|
96
77
|
|
|
97
78
|
channel_name = f"workflow_updates:{execution_id}"
|
|
98
79
|
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
80
|
+
# Get WebSocket token
|
|
81
|
+
try:
|
|
82
|
+
async with httpx.AsyncClient() as client:
|
|
83
|
+
ws_response = await client.post(
|
|
84
|
+
f"{URL_API_PROMPTLAYER}/ws-token-request-library",
|
|
85
|
+
headers=headers,
|
|
86
|
+
params={"capability": channel_name},
|
|
87
|
+
)
|
|
88
|
+
if ws_response.status_code != 201:
|
|
89
|
+
raise_on_bad_response(
|
|
90
|
+
ws_response,
|
|
91
|
+
"PromptLayer had the following error while getting WebSocket token",
|
|
92
|
+
)
|
|
93
|
+
token_details = ws_response.json()["token_details"]
|
|
94
|
+
except Exception as e:
|
|
95
|
+
error_message = f"Failed to get WebSocket token: {e}"
|
|
96
|
+
print(error_message)
|
|
97
|
+
raise Exception(error_message)
|
|
105
98
|
|
|
106
99
|
# Initialize Ably client
|
|
107
100
|
ably_client = AblyRealtime(token=token_details["token"])
|
|
@@ -112,7 +105,7 @@ async def run_workflow_request(
|
|
|
112
105
|
final_output = {}
|
|
113
106
|
message_received_event = asyncio.Event()
|
|
114
107
|
|
|
115
|
-
async def message_listener(message):
|
|
108
|
+
async def message_listener(message: Message):
|
|
116
109
|
if message.name == "set_workflow_node_output":
|
|
117
110
|
data = json.loads(message.data)
|
|
118
111
|
if data.get("status") == "workflow_complete":
|
|
@@ -130,7 +123,7 @@ async def run_workflow_request(
|
|
|
130
123
|
await ably_client.close()
|
|
131
124
|
raise Exception("Workflow execution did not complete properly")
|
|
132
125
|
|
|
133
|
-
# Unsubscribe from the channel
|
|
126
|
+
# Unsubscribe from the channel and close the client
|
|
134
127
|
channel.unsubscribe("set_workflow_node_output", message_listener)
|
|
135
128
|
await ably_client.close()
|
|
136
129
|
|
|
@@ -761,7 +754,9 @@ def warn_on_bad_response(request_response, main_message):
|
|
|
761
754
|
def raise_on_bad_response(request_response, main_message):
|
|
762
755
|
if hasattr(request_response, "json"):
|
|
763
756
|
try:
|
|
764
|
-
raise Exception(
|
|
757
|
+
raise Exception(
|
|
758
|
+
f"{main_message}: {request_response.json().get('message') or request_response.json().get('error')}"
|
|
759
|
+
)
|
|
765
760
|
except json.JSONDecodeError:
|
|
766
761
|
raise Exception(f"{main_message}: {request_response}")
|
|
767
762
|
else:
|
|
@@ -953,7 +948,11 @@ async def aget_prompt_template(
|
|
|
953
948
|
headers={"X-API-KEY": api_key},
|
|
954
949
|
json=json_body,
|
|
955
950
|
)
|
|
956
|
-
|
|
951
|
+
if response.status_code != 200:
|
|
952
|
+
raise_on_bad_response(
|
|
953
|
+
response,
|
|
954
|
+
"PromptLayer had the following error while getting your prompt template",
|
|
955
|
+
)
|
|
957
956
|
warning = response.json().get("warning", None)
|
|
958
957
|
if warning:
|
|
959
958
|
warn_on_bad_response(
|
|
@@ -961,10 +960,6 @@ async def aget_prompt_template(
|
|
|
961
960
|
"WARNING: While getting your prompt template",
|
|
962
961
|
)
|
|
963
962
|
return response.json()
|
|
964
|
-
except httpx.HTTPStatusError as e:
|
|
965
|
-
raise Exception(
|
|
966
|
-
f"PromptLayer had the following error while getting your prompt template: {e.response.text}"
|
|
967
|
-
) from e
|
|
968
963
|
except httpx.RequestError as e:
|
|
969
964
|
raise Exception(
|
|
970
965
|
f"PromptLayer had the following error while getting your prompt template: {str(e)}"
|
|
@@ -1015,12 +1010,12 @@ async def apublish_prompt_template(
|
|
|
1015
1010
|
raise Exception(
|
|
1016
1011
|
f"PromptLayer had the following error while publishing your prompt template: {response.text}"
|
|
1017
1012
|
)
|
|
1018
|
-
response.
|
|
1013
|
+
if response.status_code != 201:
|
|
1014
|
+
raise_on_bad_response(
|
|
1015
|
+
response,
|
|
1016
|
+
"PromptLayer had the following error while publishing your prompt template",
|
|
1017
|
+
)
|
|
1019
1018
|
return response.json()
|
|
1020
|
-
except httpx.HTTPStatusError as e:
|
|
1021
|
-
raise Exception(
|
|
1022
|
-
f"PromptLayer had the following error while publishing your prompt template: {e.response.text}"
|
|
1023
|
-
) from e
|
|
1024
1019
|
except httpx.RequestError as e:
|
|
1025
1020
|
raise Exception(
|
|
1026
1021
|
f"PromptLayer had the following error while publishing your prompt template: {str(e)}"
|
|
@@ -1058,13 +1053,13 @@ async def aget_all_prompt_templates(
|
|
|
1058
1053
|
headers={"X-API-KEY": api_key},
|
|
1059
1054
|
params={"page": page, "per_page": per_page},
|
|
1060
1055
|
)
|
|
1061
|
-
response.
|
|
1056
|
+
if response.status_code != 200:
|
|
1057
|
+
raise_on_bad_response(
|
|
1058
|
+
response,
|
|
1059
|
+
"PromptLayer had the following error while getting all your prompt templates",
|
|
1060
|
+
)
|
|
1062
1061
|
items = response.json().get("items", [])
|
|
1063
1062
|
return items
|
|
1064
|
-
except httpx.HTTPStatusError as e:
|
|
1065
|
-
raise Exception(
|
|
1066
|
-
f"PromptLayer had the following error while getting all your prompt templates: {e.response.text}"
|
|
1067
|
-
) from e
|
|
1068
1063
|
except httpx.RequestError as e:
|
|
1069
1064
|
raise Exception(
|
|
1070
1065
|
f"PromptLayer had the following error while getting all your prompt templates: {str(e)}"
|
|
@@ -1264,7 +1259,7 @@ def openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
|
|
|
1264
1259
|
def azure_openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
|
|
1265
1260
|
from openai import AzureOpenAI
|
|
1266
1261
|
|
|
1267
|
-
client = AzureOpenAI(
|
|
1262
|
+
client = AzureOpenAI(azure_endpoint=kwargs.pop("base_url", None))
|
|
1268
1263
|
request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[
|
|
1269
1264
|
prompt_blueprint["prompt_template"]["type"]
|
|
1270
1265
|
]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: promptlayer
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.29
|
|
4
4
|
Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
|
|
5
5
|
License: Apache-2.0
|
|
6
6
|
Author: Magniv
|
|
@@ -16,6 +16,7 @@ Classifier: Programming Language :: Python :: 3.13
|
|
|
16
16
|
Requires-Dist: ably (>=2.0.6,<3.0.0)
|
|
17
17
|
Requires-Dist: aiohttp (>=3.10.10,<4.0.0)
|
|
18
18
|
Requires-Dist: httpx (>=0.27.2,<0.28.0)
|
|
19
|
+
Requires-Dist: nest-asyncio (>=1.6.0,<2.0.0)
|
|
19
20
|
Requires-Dist: opentelemetry-api (>=1.26.0,<2.0.0)
|
|
20
21
|
Requires-Dist: opentelemetry-sdk (>=1.26.0,<2.0.0)
|
|
21
22
|
Requires-Dist: requests (>=2.31.0,<3.0.0)
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
promptlayer/__init__.py,sha256=
|
|
1
|
+
promptlayer/__init__.py,sha256=v0SGSQb6xyDYZEA6IFcOSWw9FK_WUCe8OaEc1ZJ1N9M,140
|
|
2
2
|
promptlayer/groups/__init__.py,sha256=xhOAolLUBkr76ZHvJr29OwjCIk1V9qKQXjZCuyTJUIY,429
|
|
3
3
|
promptlayer/groups/groups.py,sha256=YPROicy-TzpkrpA8vOpZS2lwvJ6VRtlbQ1S2oT1N0vM,338
|
|
4
|
-
promptlayer/promptlayer.py,sha256=
|
|
4
|
+
promptlayer/promptlayer.py,sha256=3M6kBCJPAejUaWE-NhynOWe1Ml6VPXxCVh3OlME4WUA,20879
|
|
5
5
|
promptlayer/promptlayer_base.py,sha256=sev-EZehRXJSZSmJtMkqmAUK1345pqbDY_lNjPP5MYA,7158
|
|
6
6
|
promptlayer/span_exporter.py,sha256=zIJNsb3Fe6yb5wKLDmkoPF2wqFjk1p39E0jWHD2plzI,2658
|
|
7
7
|
promptlayer/templates.py,sha256=bdX8ZxydWwF9QMF1UBD-qoYqYRPrUSTAt88r2D8ws7c,1193
|
|
@@ -10,8 +10,8 @@ promptlayer/track/track.py,sha256=UdkCxhWUvhvPdhsoHj4qmeiRq6xLcWmeIdYXrgZph04,32
|
|
|
10
10
|
promptlayer/types/__init__.py,sha256=xJcvQuOk91ZBBePb40-1FDNDKYrZoH5lPE2q6_UhprM,111
|
|
11
11
|
promptlayer/types/prompt_template.py,sha256=TUXLXvuvew0EBLfTMBa2LhFeQoF7R-tcFKg7_UUtHMQ,4433
|
|
12
12
|
promptlayer/types/request_log.py,sha256=xU6bcxQar6GaBOJlgZTavXUV3FjE8sF_nSjPu4Ya_00,174
|
|
13
|
-
promptlayer/utils.py,sha256
|
|
14
|
-
promptlayer-1.0.
|
|
15
|
-
promptlayer-1.0.
|
|
16
|
-
promptlayer-1.0.
|
|
17
|
-
promptlayer-1.0.
|
|
13
|
+
promptlayer/utils.py,sha256=s_7XMGRjuqTJjPDArixBahsGVlO7xcerxgcVijd12BQ,44690
|
|
14
|
+
promptlayer-1.0.29.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
15
|
+
promptlayer-1.0.29.dist-info/METADATA,sha256=Sh2YeNNgTxoPUG1jVh6z4M4vCV6zrSssU-PERG1XnSc,4824
|
|
16
|
+
promptlayer-1.0.29.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
|
17
|
+
promptlayer-1.0.29.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|