promptlayer 1.0.23__tar.gz → 1.0.25__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of promptlayer might be problematic. Click here for more details.
- {promptlayer-1.0.23 → promptlayer-1.0.25}/PKG-INFO +2 -1
- {promptlayer-1.0.23 → promptlayer-1.0.25}/promptlayer/__init__.py +1 -1
- {promptlayer-1.0.23 → promptlayer-1.0.25}/promptlayer/promptlayer.py +8 -4
- {promptlayer-1.0.23 → promptlayer-1.0.25}/promptlayer/utils.py +82 -14
- {promptlayer-1.0.23 → promptlayer-1.0.25}/pyproject.toml +2 -1
- {promptlayer-1.0.23 → promptlayer-1.0.25}/LICENSE +0 -0
- {promptlayer-1.0.23 → promptlayer-1.0.25}/README.md +0 -0
- {promptlayer-1.0.23 → promptlayer-1.0.25}/promptlayer/groups/__init__.py +0 -0
- {promptlayer-1.0.23 → promptlayer-1.0.25}/promptlayer/groups/groups.py +0 -0
- {promptlayer-1.0.23 → promptlayer-1.0.25}/promptlayer/promptlayer_base.py +0 -0
- {promptlayer-1.0.23 → promptlayer-1.0.25}/promptlayer/span_exporter.py +0 -0
- {promptlayer-1.0.23 → promptlayer-1.0.25}/promptlayer/templates.py +0 -0
- {promptlayer-1.0.23 → promptlayer-1.0.25}/promptlayer/track/__init__.py +0 -0
- {promptlayer-1.0.23 → promptlayer-1.0.25}/promptlayer/track/track.py +0 -0
- {promptlayer-1.0.23 → promptlayer-1.0.25}/promptlayer/types/__init__.py +0 -0
- {promptlayer-1.0.23 → promptlayer-1.0.25}/promptlayer/types/prompt_template.py +0 -0
- {promptlayer-1.0.23 → promptlayer-1.0.25}/promptlayer/types/request_log.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: promptlayer
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.25
|
|
4
4
|
Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
|
|
5
5
|
License: Apache-2.0
|
|
6
6
|
Author: Magniv
|
|
@@ -13,6 +13,7 @@ Classifier: Programming Language :: Python :: 3.10
|
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.11
|
|
14
14
|
Classifier: Programming Language :: Python :: 3.12
|
|
15
15
|
Classifier: Programming Language :: Python :: 3.13
|
|
16
|
+
Requires-Dist: ably (>=2.0.6,<3.0.0)
|
|
16
17
|
Requires-Dist: opentelemetry-api (>=1.26.0,<2.0.0)
|
|
17
18
|
Requires-Dist: opentelemetry-sdk (>=1.26.0,<2.0.0)
|
|
18
19
|
Requires-Dist: requests (>=2.31.0,<3.0.0)
|
|
@@ -24,7 +24,7 @@ from promptlayer.utils import (
|
|
|
24
24
|
openai_request,
|
|
25
25
|
openai_stream_chat,
|
|
26
26
|
openai_stream_completion,
|
|
27
|
-
|
|
27
|
+
run_workflow_async,
|
|
28
28
|
stream_response,
|
|
29
29
|
track_request,
|
|
30
30
|
util_log_request,
|
|
@@ -377,16 +377,20 @@ class PromptLayer:
|
|
|
377
377
|
input_variables: Optional[Dict[str, Any]] = None,
|
|
378
378
|
metadata: Optional[Dict[str, str]] = None,
|
|
379
379
|
workflow_label_name: Optional[str] = None,
|
|
380
|
-
|
|
380
|
+
workflow_version: Optional[
|
|
381
|
+
int
|
|
382
|
+
] = None, # This is the version number, not the version ID
|
|
383
|
+
return_all_outputs: Optional[bool] = False,
|
|
381
384
|
) -> Dict[str, Any]:
|
|
382
385
|
try:
|
|
383
|
-
result =
|
|
386
|
+
result = run_workflow_async(
|
|
384
387
|
workflow_name=workflow_name,
|
|
385
388
|
input_variables=input_variables or {},
|
|
386
389
|
metadata=metadata,
|
|
387
390
|
workflow_label_name=workflow_label_name,
|
|
388
|
-
workflow_version_number=
|
|
391
|
+
workflow_version_number=workflow_version,
|
|
389
392
|
api_key=self.api_key,
|
|
393
|
+
return_all_outputs=return_all_outputs,
|
|
390
394
|
)
|
|
391
395
|
return result
|
|
392
396
|
except Exception as e:
|
|
@@ -10,7 +10,9 @@ from copy import deepcopy
|
|
|
10
10
|
from enum import Enum
|
|
11
11
|
from typing import Any, Callable, Dict, Generator, List, Optional, Union
|
|
12
12
|
|
|
13
|
+
import aiohttp
|
|
13
14
|
import requests
|
|
15
|
+
from ably import AblyRealtime
|
|
14
16
|
from opentelemetry import context, trace
|
|
15
17
|
|
|
16
18
|
from promptlayer.types import RequestLog
|
|
@@ -27,7 +29,7 @@ URL_API_PROMPTLAYER = os.environ.setdefault(
|
|
|
27
29
|
)
|
|
28
30
|
|
|
29
31
|
|
|
30
|
-
def
|
|
32
|
+
def run_workflow_async(
|
|
31
33
|
*,
|
|
32
34
|
workflow_name: str,
|
|
33
35
|
input_variables: Dict[str, Any],
|
|
@@ -35,37 +37,103 @@ def run_workflow_request(
|
|
|
35
37
|
workflow_label_name: Optional[str] = None,
|
|
36
38
|
workflow_version_number: Optional[int] = None,
|
|
37
39
|
api_key: str,
|
|
40
|
+
return_all_outputs: Optional[bool] = False,
|
|
41
|
+
timeout: Optional[int] = 120,
|
|
42
|
+
) -> Dict[str, Any]:
|
|
43
|
+
return asyncio.run(
|
|
44
|
+
run_workflow_request(
|
|
45
|
+
workflow_name=workflow_name,
|
|
46
|
+
input_variables=input_variables,
|
|
47
|
+
metadata=metadata,
|
|
48
|
+
workflow_label_name=workflow_label_name,
|
|
49
|
+
workflow_version_number=workflow_version_number,
|
|
50
|
+
api_key=api_key,
|
|
51
|
+
return_all_outputs=return_all_outputs,
|
|
52
|
+
timeout=timeout,
|
|
53
|
+
)
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
async def run_workflow_request(
|
|
58
|
+
*,
|
|
59
|
+
workflow_name: str,
|
|
60
|
+
input_variables: Dict[str, Any],
|
|
61
|
+
metadata: Optional[Dict[str, str]] = None,
|
|
62
|
+
workflow_label_name: Optional[str] = None,
|
|
63
|
+
workflow_version_number: Optional[int] = None,
|
|
64
|
+
api_key: str,
|
|
65
|
+
return_all_outputs: Optional[bool] = None,
|
|
66
|
+
timeout: Optional[int] = 120,
|
|
38
67
|
) -> Dict[str, Any]:
|
|
39
68
|
payload = {
|
|
40
69
|
"input_variables": input_variables,
|
|
41
70
|
"metadata": metadata,
|
|
42
71
|
"workflow_label_name": workflow_label_name,
|
|
43
72
|
"workflow_version_number": workflow_version_number,
|
|
73
|
+
"return_all_outputs": return_all_outputs,
|
|
44
74
|
}
|
|
45
75
|
|
|
46
76
|
url = f"{URL_API_PROMPTLAYER}/workflows/{workflow_name}/run"
|
|
47
77
|
headers = {"X-API-KEY": api_key}
|
|
48
78
|
|
|
49
79
|
try:
|
|
50
|
-
|
|
51
|
-
|
|
80
|
+
async with aiohttp.ClientSession() as session:
|
|
81
|
+
async with session.post(url, json=payload, headers=headers) as response:
|
|
82
|
+
if response.status != 201:
|
|
83
|
+
error_message = f"Failed to run workflow: {response.status} {await response.text()}"
|
|
84
|
+
print(error_message)
|
|
85
|
+
raise Exception(error_message)
|
|
86
|
+
result = await response.json()
|
|
87
|
+
except Exception as e:
|
|
52
88
|
error_message = f"Failed to run workflow: {e}"
|
|
53
|
-
print(error_message
|
|
89
|
+
print(error_message)
|
|
54
90
|
raise Exception(error_message)
|
|
55
91
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
except ValueError:
|
|
60
|
-
error_details = response.text or "Unknown error"
|
|
92
|
+
execution_id = result.get("workflow_version_execution_id")
|
|
93
|
+
if not execution_id:
|
|
94
|
+
raise Exception("No execution ID returned from workflow run")
|
|
61
95
|
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
96
|
+
channel_name = f"workflow_updates:{execution_id}"
|
|
97
|
+
|
|
98
|
+
ws_response = requests.post(
|
|
99
|
+
f"{URL_API_PROMPTLAYER}/ws-token-request-library",
|
|
100
|
+
headers=headers,
|
|
101
|
+
params={"capability": channel_name},
|
|
102
|
+
)
|
|
103
|
+
token_details = ws_response.json()["token_details"]
|
|
104
|
+
|
|
105
|
+
# Initialize Ably client
|
|
106
|
+
ably_client = AblyRealtime(token=token_details["token"])
|
|
107
|
+
|
|
108
|
+
# Subscribe to the channel named after the execution ID
|
|
109
|
+
channel = ably_client.channels.get(channel_name)
|
|
110
|
+
|
|
111
|
+
final_output = {}
|
|
112
|
+
message_received_event = asyncio.Event()
|
|
113
|
+
|
|
114
|
+
async def message_listener(message):
|
|
115
|
+
if message.name == "set_workflow_node_output":
|
|
116
|
+
data = json.loads(message.data)
|
|
117
|
+
if data.get("status") == "workflow_complete":
|
|
118
|
+
final_output.update(data.get("final_output", {}))
|
|
119
|
+
message_received_event.set()
|
|
120
|
+
|
|
121
|
+
# Subscribe to the channel
|
|
122
|
+
await channel.subscribe("set_workflow_node_output", message_listener)
|
|
123
|
+
|
|
124
|
+
# Wait for the message or timeout
|
|
125
|
+
try:
|
|
126
|
+
await asyncio.wait_for(message_received_event.wait(), timeout)
|
|
127
|
+
except asyncio.TimeoutError:
|
|
128
|
+
channel.unsubscribe("set_workflow_node_output", message_listener)
|
|
129
|
+
await ably_client.close()
|
|
130
|
+
raise Exception("Workflow execution did not complete properly")
|
|
65
131
|
|
|
66
|
-
|
|
132
|
+
# Unsubscribe from the channel
|
|
133
|
+
channel.unsubscribe("set_workflow_node_output", message_listener)
|
|
134
|
+
await ably_client.close()
|
|
67
135
|
|
|
68
|
-
return
|
|
136
|
+
return final_output
|
|
69
137
|
|
|
70
138
|
|
|
71
139
|
def promptlayer_api_handler(
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "promptlayer"
|
|
3
|
-
version = "1.0.
|
|
3
|
+
version = "1.0.25"
|
|
4
4
|
description = "PromptLayer is a platform for prompt engineering and tracks your LLM requests."
|
|
5
5
|
authors = ["Magniv <hello@magniv.io>"]
|
|
6
6
|
license = "Apache-2.0"
|
|
@@ -11,6 +11,7 @@ python = ">=3.8.1,<4.0"
|
|
|
11
11
|
requests = "^2.31.0"
|
|
12
12
|
opentelemetry-api = "^1.26.0"
|
|
13
13
|
opentelemetry-sdk = "^1.26.0"
|
|
14
|
+
ably = "^2.0.6"
|
|
14
15
|
|
|
15
16
|
[tool.poetry.group.dev.dependencies]
|
|
16
17
|
langchain = "^0.0.260"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|