promptlayer 1.0.46__tar.gz → 1.0.48__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of promptlayer might be problematic. Click here for more details.
- {promptlayer-1.0.46 → promptlayer-1.0.48}/PKG-INFO +1 -2
- {promptlayer-1.0.46 → promptlayer-1.0.48}/promptlayer/__init__.py +1 -1
- {promptlayer-1.0.46 → promptlayer-1.0.48}/promptlayer/promptlayer.py +6 -14
- {promptlayer-1.0.46 → promptlayer-1.0.48}/promptlayer/utils.py +74 -53
- {promptlayer-1.0.46 → promptlayer-1.0.48}/pyproject.toml +5 -2
- {promptlayer-1.0.46 → promptlayer-1.0.48}/LICENSE +0 -0
- {promptlayer-1.0.46 → promptlayer-1.0.48}/README.md +0 -0
- {promptlayer-1.0.46 → promptlayer-1.0.48}/promptlayer/groups/__init__.py +0 -0
- {promptlayer-1.0.46 → promptlayer-1.0.48}/promptlayer/groups/groups.py +0 -0
- {promptlayer-1.0.46 → promptlayer-1.0.48}/promptlayer/promptlayer_base.py +0 -0
- {promptlayer-1.0.46 → promptlayer-1.0.48}/promptlayer/promptlayer_mixins.py +0 -0
- {promptlayer-1.0.46 → promptlayer-1.0.48}/promptlayer/span_exporter.py +0 -0
- {promptlayer-1.0.46 → promptlayer-1.0.48}/promptlayer/templates.py +0 -0
- {promptlayer-1.0.46 → promptlayer-1.0.48}/promptlayer/track/__init__.py +0 -0
- {promptlayer-1.0.46 → promptlayer-1.0.48}/promptlayer/track/track.py +0 -0
- {promptlayer-1.0.46 → promptlayer-1.0.48}/promptlayer/types/__init__.py +0 -0
- {promptlayer-1.0.46 → promptlayer-1.0.48}/promptlayer/types/prompt_template.py +0 -0
- {promptlayer-1.0.46 → promptlayer-1.0.48}/promptlayer/types/request_log.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: promptlayer
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.48
|
|
4
4
|
Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
|
|
5
5
|
License: Apache-2.0
|
|
6
6
|
Author: Magniv
|
|
@@ -15,7 +15,6 @@ Classifier: Programming Language :: Python :: 3.12
|
|
|
15
15
|
Classifier: Programming Language :: Python :: 3.13
|
|
16
16
|
Requires-Dist: ably (>=2.0.11,<3.0.0)
|
|
17
17
|
Requires-Dist: aiohttp (>=3.10.10,<4.0.0)
|
|
18
|
-
Requires-Dist: anthropic (==0.49.0)
|
|
19
18
|
Requires-Dist: httpx (>=0.28.1,<0.29.0)
|
|
20
19
|
Requires-Dist: nest-asyncio (>=1.6.0,<2.0.0)
|
|
21
20
|
Requires-Dist: opentelemetry-api (>=1.26.0,<2.0.0)
|
|
@@ -71,24 +71,17 @@ class PromptLayer(PromptLayerMixin):
|
|
|
71
71
|
if name == "openai":
|
|
72
72
|
import openai as openai_module
|
|
73
73
|
|
|
74
|
-
openai =
|
|
75
|
-
openai_module,
|
|
76
|
-
function_name="openai",
|
|
77
|
-
api_key=self.api_key,
|
|
78
|
-
tracer=self.tracer,
|
|
79
|
-
)
|
|
80
|
-
return openai
|
|
74
|
+
return PromptLayerBase(openai_module, function_name="openai", api_key=self.api_key, tracer=self.tracer)
|
|
81
75
|
elif name == "anthropic":
|
|
82
76
|
import anthropic as anthropic_module
|
|
83
77
|
|
|
84
|
-
|
|
78
|
+
return PromptLayerBase(
|
|
85
79
|
anthropic_module,
|
|
86
80
|
function_name="anthropic",
|
|
87
81
|
provider_type="anthropic",
|
|
88
82
|
api_key=self.api_key,
|
|
89
83
|
tracer=self.tracer,
|
|
90
84
|
)
|
|
91
|
-
return anthropic
|
|
92
85
|
else:
|
|
93
86
|
raise AttributeError(f"module {__name__} has no attribute {name}")
|
|
94
87
|
|
|
@@ -163,7 +156,6 @@ class PromptLayer(PromptLayerMixin):
|
|
|
163
156
|
),
|
|
164
157
|
llm_request_params["stream_function"],
|
|
165
158
|
)
|
|
166
|
-
|
|
167
159
|
request_log = self._track_request_log(
|
|
168
160
|
llm_request_params,
|
|
169
161
|
tags,
|
|
@@ -171,7 +163,7 @@ class PromptLayer(PromptLayerMixin):
|
|
|
171
163
|
group_id,
|
|
172
164
|
pl_run_span_id,
|
|
173
165
|
metadata=metadata,
|
|
174
|
-
request_response=response.model_dump(),
|
|
166
|
+
request_response=response.model_dump(mode="json"),
|
|
175
167
|
)
|
|
176
168
|
|
|
177
169
|
return {
|
|
@@ -290,8 +282,8 @@ class PromptLayer(PromptLayerMixin):
|
|
|
290
282
|
raise Exception(json.dumps(results, indent=4))
|
|
291
283
|
|
|
292
284
|
return results
|
|
293
|
-
except Exception as
|
|
294
|
-
raise Exception(f"Error running workflow: {str(
|
|
285
|
+
except Exception as ex:
|
|
286
|
+
raise Exception(f"Error running workflow: {str(ex)}") from ex
|
|
295
287
|
|
|
296
288
|
def log_request(
|
|
297
289
|
self,
|
|
@@ -585,7 +577,7 @@ class AsyncPromptLayer(PromptLayerMixin):
|
|
|
585
577
|
group_id,
|
|
586
578
|
pl_run_span_id,
|
|
587
579
|
metadata=metadata,
|
|
588
|
-
request_response=response.model_dump(),
|
|
580
|
+
request_response=response.model_dump(mode="json"),
|
|
589
581
|
)
|
|
590
582
|
|
|
591
583
|
return {
|
|
@@ -26,6 +26,61 @@ from promptlayer.types.prompt_template import (
|
|
|
26
26
|
)
|
|
27
27
|
|
|
28
28
|
URL_API_PROMPTLAYER = os.environ.setdefault("URL_API_PROMPTLAYER", "https://api.promptlayer.com")
|
|
29
|
+
WORKFLOWS_RUN_URL = URL_API_PROMPTLAYER + "/workflows/{}/run"
|
|
30
|
+
WS_TOKEN_REQUEST_LIBRARY_URL = URL_API_PROMPTLAYER + "/ws-token-request-library"
|
|
31
|
+
|
|
32
|
+
SET_WORKFLOW_COMPLETE_MESSAGE = "SET_WORKFLOW_COMPLETE"
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class FinalOutputCode(Enum):
|
|
36
|
+
OK = "OK"
|
|
37
|
+
EXCEEDS_SIZE_LIMIT = "EXCEEDS_SIZE_LIMIT"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
async def _get_final_output(execution_id: int, return_all_outputs: bool, *, headers: Dict[str, str]) -> Dict[str, Any]:
|
|
41
|
+
async with httpx.AsyncClient() as client:
|
|
42
|
+
response = await client.get(
|
|
43
|
+
f"{URL_API_PROMPTLAYER}/workflow-version-execution-results",
|
|
44
|
+
headers=headers,
|
|
45
|
+
params={"workflow_version_execution_id": execution_id, "return_all_outputs": return_all_outputs},
|
|
46
|
+
)
|
|
47
|
+
response.raise_for_status()
|
|
48
|
+
return response.json()
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _make_message_listener(results_future, execution_id, return_all_outputs, headers):
|
|
52
|
+
async def message_listener(message: Message):
|
|
53
|
+
if message.name != SET_WORKFLOW_COMPLETE_MESSAGE: # TODO(dmu) LOW: Do we really need this check?
|
|
54
|
+
return
|
|
55
|
+
|
|
56
|
+
message_data = json.loads(message.data)
|
|
57
|
+
result_code = message_data.get("result_code")
|
|
58
|
+
if result_code in (FinalOutputCode.OK.value, None):
|
|
59
|
+
results = message_data["final_output"]
|
|
60
|
+
elif result_code == FinalOutputCode.EXCEEDS_SIZE_LIMIT.value:
|
|
61
|
+
results = await _get_final_output(execution_id, return_all_outputs, headers=headers)
|
|
62
|
+
else:
|
|
63
|
+
raise NotImplementedError(f"Unsupported final output code: {result_code}")
|
|
64
|
+
|
|
65
|
+
results_future.set_result(results)
|
|
66
|
+
|
|
67
|
+
return message_listener
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
async def _wait_for_workflow_completion(*, token, channel_name, execution_id, return_all_outputs, headers, timeout):
|
|
71
|
+
results = asyncio.Future()
|
|
72
|
+
message_listener = _make_message_listener(results, execution_id, return_all_outputs, headers)
|
|
73
|
+
|
|
74
|
+
client = AblyRealtime(token=token)
|
|
75
|
+
channel = client.channels.get(channel_name)
|
|
76
|
+
await channel.subscribe(SET_WORKFLOW_COMPLETE_MESSAGE, message_listener)
|
|
77
|
+
try:
|
|
78
|
+
return await asyncio.wait_for(results, timeout)
|
|
79
|
+
except asyncio.TimeoutError as ex:
|
|
80
|
+
raise Exception("Workflow execution did not complete properly") from ex
|
|
81
|
+
finally:
|
|
82
|
+
channel.unsubscribe(SET_WORKFLOW_COMPLETE_MESSAGE, message_listener)
|
|
83
|
+
await client.close()
|
|
29
84
|
|
|
30
85
|
|
|
31
86
|
async def arun_workflow_request(
|
|
@@ -46,13 +101,10 @@ async def arun_workflow_request(
|
|
|
46
101
|
"workflow_version_number": workflow_version_number,
|
|
47
102
|
"return_all_outputs": return_all_outputs,
|
|
48
103
|
}
|
|
49
|
-
|
|
50
|
-
url = f"{URL_API_PROMPTLAYER}/workflows/{workflow_name}/run"
|
|
51
104
|
headers = {"X-API-KEY": api_key}
|
|
52
|
-
|
|
53
105
|
try:
|
|
54
106
|
async with httpx.AsyncClient() as client:
|
|
55
|
-
response = await client.post(
|
|
107
|
+
response = await client.post(WORKFLOWS_RUN_URL.format(workflow_name), json=payload, headers=headers)
|
|
56
108
|
if response.status_code != 201:
|
|
57
109
|
raise_on_bad_response(
|
|
58
110
|
response,
|
|
@@ -60,28 +112,22 @@ async def arun_workflow_request(
|
|
|
60
112
|
)
|
|
61
113
|
|
|
62
114
|
result = response.json()
|
|
63
|
-
warning
|
|
64
|
-
if warning:
|
|
115
|
+
if warning := result.get("warning"):
|
|
65
116
|
print(f"WARNING: {warning}")
|
|
66
117
|
|
|
67
|
-
except Exception as
|
|
68
|
-
error_message = f"Failed to run workflow: {str(
|
|
118
|
+
except Exception as ex:
|
|
119
|
+
error_message = f"Failed to run workflow: {str(ex)}"
|
|
69
120
|
print(error_message)
|
|
70
121
|
raise Exception(error_message)
|
|
71
122
|
|
|
72
|
-
execution_id
|
|
73
|
-
if not execution_id:
|
|
123
|
+
if not (execution_id := result.get("workflow_version_execution_id")):
|
|
74
124
|
raise Exception("No execution ID returned from workflow run")
|
|
75
125
|
|
|
76
126
|
channel_name = f"workflow_updates:{execution_id}"
|
|
77
|
-
|
|
78
|
-
# Get WebSocket token
|
|
79
127
|
try:
|
|
80
128
|
async with httpx.AsyncClient() as client:
|
|
81
129
|
ws_response = await client.post(
|
|
82
|
-
|
|
83
|
-
headers=headers,
|
|
84
|
-
params={"capability": channel_name},
|
|
130
|
+
WS_TOKEN_REQUEST_LIBRARY_URL, headers=headers, params={"capability": channel_name}
|
|
85
131
|
)
|
|
86
132
|
if ws_response.status_code != 201:
|
|
87
133
|
raise_on_bad_response(
|
|
@@ -89,44 +135,19 @@ async def arun_workflow_request(
|
|
|
89
135
|
"PromptLayer had the following error while getting WebSocket token",
|
|
90
136
|
)
|
|
91
137
|
token_details = ws_response.json()["token_details"]
|
|
92
|
-
except Exception as
|
|
93
|
-
error_message = f"Failed to get WebSocket token: {
|
|
138
|
+
except Exception as ex:
|
|
139
|
+
error_message = f"Failed to get WebSocket token: {ex}"
|
|
94
140
|
print(error_message)
|
|
95
|
-
raise Exception(error_message)
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
async def message_listener(message: Message):
|
|
107
|
-
nonlocal results
|
|
108
|
-
|
|
109
|
-
if message.name == "SET_WORKFLOW_COMPLETE":
|
|
110
|
-
message_data = json.loads(message.data)
|
|
111
|
-
results = message_data["final_output"]
|
|
112
|
-
message_received_event.set()
|
|
113
|
-
|
|
114
|
-
# Subscribe to the channel
|
|
115
|
-
await channel.subscribe("SET_WORKFLOW_COMPLETE", message_listener)
|
|
116
|
-
|
|
117
|
-
# Wait for the message or timeout
|
|
118
|
-
try:
|
|
119
|
-
await asyncio.wait_for(message_received_event.wait(), timeout)
|
|
120
|
-
except asyncio.TimeoutError:
|
|
121
|
-
channel.unsubscribe("SET_WORKFLOW_COMPLETE", message_listener)
|
|
122
|
-
await ably_client.close()
|
|
123
|
-
raise Exception("Workflow execution did not complete properly")
|
|
124
|
-
|
|
125
|
-
# Unsubscribe from the channel and close the client
|
|
126
|
-
channel.unsubscribe("SET_WORKFLOW_COMPLETE", message_listener)
|
|
127
|
-
await ably_client.close()
|
|
128
|
-
|
|
129
|
-
return results
|
|
141
|
+
raise Exception(error_message) from ex
|
|
142
|
+
|
|
143
|
+
return await _wait_for_workflow_completion(
|
|
144
|
+
token=token_details["token"],
|
|
145
|
+
channel_name=channel_name,
|
|
146
|
+
execution_id=execution_id,
|
|
147
|
+
return_all_outputs=return_all_outputs,
|
|
148
|
+
headers=headers,
|
|
149
|
+
timeout=timeout,
|
|
150
|
+
)
|
|
130
151
|
|
|
131
152
|
|
|
132
153
|
def promptlayer_api_handler(
|
|
@@ -1813,7 +1834,7 @@ async def agoogle_request(request: GetPromptTemplateResponse, **kwargs):
|
|
|
1813
1834
|
async def amap_google_stream_response(generator: AsyncIterable[Any]):
|
|
1814
1835
|
from google.genai.chats import GenerateContentResponse
|
|
1815
1836
|
|
|
1816
|
-
|
|
1837
|
+
GenerateContentResponse()
|
|
1817
1838
|
content = ""
|
|
1818
1839
|
async for result in generator:
|
|
1819
1840
|
content = f"{content}{result.candidates[0].content.parts[0].text}"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "promptlayer"
|
|
3
|
-
version = "1.0.
|
|
3
|
+
version = "1.0.48"
|
|
4
4
|
description = "PromptLayer is a platform for prompt engineering and tracks your LLM requests."
|
|
5
5
|
authors = ["Magniv <hello@magniv.io>"]
|
|
6
6
|
license = "Apache-2.0"
|
|
@@ -15,7 +15,6 @@ ably = "^2.0.11"
|
|
|
15
15
|
aiohttp = "^3.10.10"
|
|
16
16
|
httpx = "^0.28.1"
|
|
17
17
|
nest-asyncio = "^1.6.0"
|
|
18
|
-
anthropic = "0.49.0"
|
|
19
18
|
|
|
20
19
|
[tool.poetry.group.dev.dependencies]
|
|
21
20
|
behave = "^1.2.6"
|
|
@@ -23,6 +22,10 @@ pytest = "^8.2.0"
|
|
|
23
22
|
pytest-asyncio = "^0.23.6"
|
|
24
23
|
openai = "^1.60.1"
|
|
25
24
|
google-genai = "^1.5.0"
|
|
25
|
+
anthropic = "0.49.0"
|
|
26
|
+
# TODO(dmu) MEDIUM: Upgrade to vcrpy >= 7 once it supports urllib3 >= 2.2.2
|
|
27
|
+
vcrpy = "<7.0.0"
|
|
28
|
+
pytest-network = "^0.0.1"
|
|
26
29
|
|
|
27
30
|
[build-system]
|
|
28
31
|
requires = ["poetry-core"]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|