promptlayer 1.0.47__tar.gz → 1.0.49__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: promptlayer
3
- Version: 1.0.47
3
+ Version: 1.0.49
4
4
  Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
5
5
  License: Apache-2.0
6
6
  Author: Magniv
@@ -1,4 +1,4 @@
1
1
  from .promptlayer import AsyncPromptLayer, PromptLayer
2
2
 
3
- __version__ = "1.0.47"
3
+ __version__ = "1.0.49"
4
4
  __all__ = ["PromptLayer", "AsyncPromptLayer", "__version__"]
@@ -1,5 +1,6 @@
1
1
  import asyncio
2
2
  import json
3
+ import logging
3
4
  import os
4
5
  from typing import Any, Dict, List, Literal, Optional, Union
5
6
 
@@ -12,6 +13,8 @@ from promptlayer.templates import AsyncTemplateManager, TemplateManager
12
13
  from promptlayer.track import AsyncTrackManager, TrackManager
13
14
  from promptlayer.types.prompt_template import PromptTemplate
14
15
  from promptlayer.utils import (
16
+ RERAISE_ORIGINAL_EXCEPTION,
17
+ _get_workflow_workflow_id_or_name,
15
18
  arun_workflow_request,
16
19
  astream_response,
17
20
  atrack_request,
@@ -21,6 +24,8 @@ from promptlayer.utils import (
21
24
  util_log_request,
22
25
  )
23
26
 
27
+ logger = logging.getLogger(__name__)
28
+
24
29
 
25
30
  def is_workflow_results_dict(obj: Any) -> bool:
26
31
  if not isinstance(obj, dict):
@@ -156,7 +161,6 @@ class PromptLayer(PromptLayerMixin):
156
161
  ),
157
162
  llm_request_params["stream_function"],
158
163
  )
159
-
160
164
  request_log = self._track_request_log(
161
165
  llm_request_params,
162
166
  tags,
@@ -164,7 +168,7 @@ class PromptLayer(PromptLayerMixin):
164
168
  group_id,
165
169
  pl_run_span_id,
166
170
  metadata=metadata,
167
- request_response=response.model_dump(),
171
+ request_response=response.model_dump(mode="json"),
168
172
  )
169
173
 
170
174
  return {
@@ -232,59 +236,53 @@ class PromptLayer(PromptLayerMixin):
232
236
 
233
237
  def run_workflow(
234
238
  self,
235
- workflow_name: str,
239
+ workflow_id_or_name: Optional[Union[int, str]] = None,
236
240
  input_variables: Optional[Dict[str, Any]] = None,
237
241
  metadata: Optional[Dict[str, str]] = None,
238
242
  workflow_label_name: Optional[str] = None,
239
243
  workflow_version: Optional[int] = None,
240
244
  return_all_outputs: Optional[bool] = False,
245
+ # `workflow_name` deprecated, kept for backward compatibility only.
246
+ # Allows `workflow_name` to be passed both as keyword and positional argument
247
+ # (virtually identical to `workflow_id_or_name`)
248
+ workflow_name: Optional[str] = None,
241
249
  ) -> Union[Dict[str, Any], Any]:
242
250
  try:
243
251
  try:
244
- # Check if we're inside a running event loop
245
- loop = asyncio.get_running_loop()
252
+ loop = asyncio.get_running_loop() # Check if we're inside a running event loop
246
253
  except RuntimeError:
247
254
  loop = None
248
255
 
249
256
  if loop and loop.is_running():
250
257
  nest_asyncio.apply()
251
- results = asyncio.run(
252
- arun_workflow_request(
253
- workflow_name=workflow_name,
254
- input_variables=input_variables or {},
255
- metadata=metadata,
256
- workflow_label_name=workflow_label_name,
257
- workflow_version_number=workflow_version,
258
- api_key=self.api_key,
259
- return_all_outputs=return_all_outputs,
260
- )
261
- )
262
- else:
263
- results = asyncio.run(
264
- arun_workflow_request(
265
- workflow_name=workflow_name,
266
- input_variables=input_variables or {},
267
- metadata=metadata,
268
- workflow_label_name=workflow_label_name,
269
- workflow_version_number=workflow_version,
270
- api_key=self.api_key,
271
- return_all_outputs=return_all_outputs,
272
- )
273
- )
274
258
 
275
- if not return_all_outputs:
276
- if is_workflow_results_dict(results):
277
- output_nodes = [node_data for node_data in results.values() if node_data.get("is_output_node")]
259
+ results = asyncio.run(
260
+ arun_workflow_request(
261
+ workflow_id_or_name=_get_workflow_workflow_id_or_name(workflow_id_or_name, workflow_name),
262
+ input_variables=input_variables or {},
263
+ metadata=metadata,
264
+ workflow_label_name=workflow_label_name,
265
+ workflow_version_number=workflow_version,
266
+ api_key=self.api_key,
267
+ return_all_outputs=return_all_outputs,
268
+ )
269
+ )
278
270
 
279
- if not output_nodes:
280
- raise Exception(json.dumps(results, indent=4))
271
+ if not return_all_outputs and is_workflow_results_dict(results):
272
+ output_nodes = [node_data for node_data in results.values() if node_data.get("is_output_node")]
273
+ if not output_nodes:
274
+ raise Exception("Output nodes not found: %S", json.dumps(results, indent=4))
281
275
 
282
- if not any(node.get("status") == "SUCCESS" for node in output_nodes):
283
- raise Exception(json.dumps(results, indent=4))
276
+ if not any(node.get("status") == "SUCCESS" for node in output_nodes):
277
+ raise Exception("None of the output nodes have succeeded", json.dumps(results, indent=4))
284
278
 
285
279
  return results
286
280
  except Exception as ex:
287
- raise Exception(f"Error running workflow: {str(ex)}") from ex
281
+ logger.exception("Error running workflow")
282
+ if RERAISE_ORIGINAL_EXCEPTION:
283
+ raise
284
+ else:
285
+ raise Exception(f"Error running workflow: {str(ex)}") from ex
288
286
 
289
287
  def log_request(
290
288
  self,
@@ -295,6 +293,8 @@ class PromptLayer(PromptLayerMixin):
295
293
  output: PromptTemplate,
296
294
  request_start_time: float,
297
295
  request_end_time: float,
296
+ # TODO(dmu) MEDIUM: Avoid using mutable defaults
297
+ # TODO(dmu) MEDIUM: Deprecate and remove this wrapper function?
298
298
  parameters: Dict[str, Any] = {},
299
299
  tags: List[str] = [],
300
300
  metadata: Dict[str, str] = {},
@@ -377,16 +377,20 @@ class AsyncPromptLayer(PromptLayerMixin):
377
377
 
378
378
  async def run_workflow(
379
379
  self,
380
- workflow_name: str,
380
+ workflow_id_or_name: Optional[Union[int, str]] = None,
381
381
  input_variables: Optional[Dict[str, Any]] = None,
382
382
  metadata: Optional[Dict[str, str]] = None,
383
383
  workflow_label_name: Optional[str] = None,
384
384
  workflow_version: Optional[int] = None, # This is the version number, not the version ID
385
385
  return_all_outputs: Optional[bool] = False,
386
+ # `workflow_name` deprecated, kept for backward compatibility only.
387
+ # Allows `workflow_name` to be passed both as keyword and positional argument
388
+ # (virtually identical to `workflow_id_or_name`)
389
+ workflow_name: Optional[str] = None,
386
390
  ) -> Dict[str, Any]:
387
391
  try:
388
- result = await arun_workflow_request(
389
- workflow_name=workflow_name,
392
+ return await arun_workflow_request(
393
+ workflow_id_or_name=_get_workflow_workflow_id_or_name(workflow_id_or_name, workflow_name),
390
394
  input_variables=input_variables or {},
391
395
  metadata=metadata,
392
396
  workflow_label_name=workflow_label_name,
@@ -394,9 +398,12 @@ class AsyncPromptLayer(PromptLayerMixin):
394
398
  api_key=self.api_key,
395
399
  return_all_outputs=return_all_outputs,
396
400
  )
397
- return result
398
- except Exception as e:
399
- raise Exception(f"Error running workflow: {str(e)}")
401
+ except Exception as ex:
402
+ logger.exception("Error running workflow")
403
+ if RERAISE_ORIGINAL_EXCEPTION:
404
+ raise
405
+ else:
406
+ raise Exception(f"Error running workflow: {str(ex)}")
400
407
 
401
408
  async def run(
402
409
  self,
@@ -578,7 +585,7 @@ class AsyncPromptLayer(PromptLayerMixin):
578
585
  group_id,
579
586
  pl_run_span_id,
580
587
  metadata=metadata,
581
- request_response=response.model_dump(),
588
+ request_response=response.model_dump(mode="json"),
582
589
  )
583
590
 
584
591
  return {
@@ -51,13 +51,8 @@ class PromptLayerSpanExporter(SpanExporter):
51
51
  try:
52
52
  response = requests.post(
53
53
  self.url,
54
- headers={
55
- "X-Api-Key": self.api_key,
56
- "Content-Type": "application/json",
57
- },
58
- json={
59
- "spans": request_data,
60
- },
54
+ headers={"X-Api-Key": self.api_key, "Content-Type": "application/json"},
55
+ json={"spans": request_data},
61
56
  )
62
57
  response.raise_for_status()
63
58
  return SpanExportResult.SUCCESS
@@ -3,12 +3,14 @@ import contextvars
3
3
  import datetime
4
4
  import functools
5
5
  import json
6
+ import logging
6
7
  import os
7
8
  import sys
8
9
  import types
9
10
  from copy import deepcopy
10
11
  from enum import Enum
11
12
  from typing import Any, AsyncGenerator, AsyncIterable, Callable, Dict, Generator, List, Optional, Union
13
+ from uuid import uuid4
12
14
 
13
15
  import httpx
14
16
  import requests
@@ -25,11 +27,20 @@ from promptlayer.types.prompt_template import (
25
27
  PublishPromptTemplateResponse,
26
28
  )
27
29
 
30
+ # Configuration
31
+ # TODO(dmu) MEDIUM: Use `PROMPTLAYER_` prefix instead of `_PROMPTLAYER` suffix
28
32
  URL_API_PROMPTLAYER = os.environ.setdefault("URL_API_PROMPTLAYER", "https://api.promptlayer.com")
29
- WORKFLOWS_RUN_URL = URL_API_PROMPTLAYER + "/workflows/{}/run"
30
- WS_TOKEN_REQUEST_LIBRARY_URL = URL_API_PROMPTLAYER + "/ws-token-request-library"
33
+ RERAISE_ORIGINAL_EXCEPTION = os.getenv("PROMPTLAYER_RE_RAISE_ORIGINAL_EXCEPTION", "False").lower() == "true"
34
+ RAISE_FOR_STATUS = os.getenv("PROMPTLAYER_RAISE_FOR_STATUS", "False").lower() == "true"
35
+ DEFAULT_HTTP_TIMEOUT = 5
31
36
 
37
+ WORKFLOW_RUN_URL_TEMPLATE = "{base_url}/workflows/{workflow_id}/run"
38
+ WORKFLOW_RUN_CHANNEL_NAME_TEMPLATE = "workflows:{workflow_id}:run:{channel_name_suffix}"
32
39
  SET_WORKFLOW_COMPLETE_MESSAGE = "SET_WORKFLOW_COMPLETE"
40
+ WS_TOKEN_REQUEST_LIBRARY_URL = URL_API_PROMPTLAYER + "/ws-token-request-library"
41
+
42
+
43
+ logger = logging.getLogger(__name__)
33
44
 
34
45
 
35
46
  class FinalOutputCode(Enum):
@@ -37,6 +48,25 @@ class FinalOutputCode(Enum):
37
48
  EXCEEDS_SIZE_LIMIT = "EXCEEDS_SIZE_LIMIT"
38
49
 
39
50
 
51
+ def _get_http_timeout():
52
+ try:
53
+ return float(os.getenv("PROMPTLAYER_HTTP_TIMEOUT", DEFAULT_HTTP_TIMEOUT))
54
+ except (ValueError, TypeError):
55
+ return DEFAULT_HTTP_TIMEOUT
56
+
57
+
58
+ def _make_httpx_client():
59
+ return httpx.AsyncClient(timeout=_get_http_timeout())
60
+
61
+
62
+ def _get_workflow_workflow_id_or_name(workflow_id_or_name, workflow_name):
63
+ # This is backward compatibility code
64
+ if (workflow_id_or_name := workflow_name if workflow_id_or_name is None else workflow_id_or_name) is None:
65
+ raise ValueError('Either "workflow_id_or_name" or "workflow_name" must be provided')
66
+
67
+ return workflow_id_or_name
68
+
69
+
40
70
  async def _get_final_output(execution_id: int, return_all_outputs: bool, *, headers: Dict[str, str]) -> Dict[str, Any]:
41
71
  async with httpx.AsyncClient() as client:
42
72
  response = await client.get(
@@ -48,14 +78,62 @@ async def _get_final_output(execution_id: int, return_all_outputs: bool, *, head
48
78
  return response.json()
49
79
 
50
80
 
51
- def _make_message_listener(results_future, execution_id, return_all_outputs, headers):
81
+ # TODO(dmu) MEDIUM: Consider putting all these functions into a class, so we do not have to pass
82
+ # `authorization_headers` into each function
83
+ async def _resolve_workflow_id(workflow_id_or_name: Union[int, str], headers):
84
+ if isinstance(workflow_id_or_name, int):
85
+ return workflow_id_or_name
86
+
87
+ # TODO(dmu) LOW: Should we warn user here to avoid using workflow names in favor of workflow id?
88
+ async with _make_httpx_client() as client:
89
+ # TODO(dmu) MEDIUM: Generalize the way we make async calls to PromptLayer API and reuse it everywhere
90
+ response = await client.get(f"{URL_API_PROMPTLAYER}/workflows/{workflow_id_or_name}", headers=headers)
91
+ if RAISE_FOR_STATUS:
92
+ response.raise_for_status()
93
+ elif response.status_code != 200:
94
+ raise_on_bad_response(response, "PromptLayer had the following error while running your workflow")
95
+
96
+ return response.json()["workflow"]["id"]
97
+
98
+
99
+ async def _get_ably_token(channel_name, authentication_headers):
100
+ try:
101
+ async with _make_httpx_client() as client:
102
+ response = await client.post(
103
+ f"{URL_API_PROMPTLAYER}/ws-token-request-library",
104
+ headers=authentication_headers,
105
+ params={"capability": channel_name},
106
+ )
107
+ if RAISE_FOR_STATUS:
108
+ response.raise_for_status()
109
+ elif response.status_code != 201:
110
+ raise_on_bad_response(
111
+ response,
112
+ "PromptLayer had the following error while getting WebSocket token",
113
+ )
114
+ return response.json()["token_details"]["token"]
115
+ except Exception as ex:
116
+ error_message = f"Failed to get WebSocket token: {ex}"
117
+ print(error_message) # TODO(dmu) MEDIUM: Remove prints in favor of logging
118
+ logger.exception(error_message)
119
+ if RERAISE_ORIGINAL_EXCEPTION:
120
+ raise
121
+ else:
122
+ raise Exception(error_message)
123
+
124
+
125
+ def _make_message_listener(results_future, execution_id_future, return_all_outputs, headers):
126
+ # We need this function to be mocked by unittests
52
127
  async def message_listener(message: Message):
53
- if message.name != SET_WORKFLOW_COMPLETE_MESSAGE: # TODO(dmu) LOW: Do we really need this check?
54
- return
128
+ if results_future.cancelled() or message.name != SET_WORKFLOW_COMPLETE_MESSAGE:
129
+ return # TODO(dmu) LOW: Do we really need this check?
55
130
 
131
+ execution_id = await asyncio.wait_for(execution_id_future, _get_http_timeout() * 1.1)
56
132
  message_data = json.loads(message.data)
57
- result_code = message_data.get("result_code")
58
- if result_code in (FinalOutputCode.OK.value, None):
133
+ if message_data["workflow_version_execution_id"] != execution_id:
134
+ return
135
+
136
+ if (result_code := message_data.get("result_code")) in (FinalOutputCode.OK.value, None):
59
137
  results = message_data["final_output"]
60
138
  elif result_code == FinalOutputCode.EXCEEDS_SIZE_LIMIT.value:
61
139
  results = await _get_final_output(execution_id, return_all_outputs, headers=headers)
@@ -67,87 +145,115 @@ def _make_message_listener(results_future, execution_id, return_all_outputs, hea
67
145
  return message_listener
68
146
 
69
147
 
70
- async def _wait_for_workflow_completion(*, token, channel_name, execution_id, return_all_outputs, headers, timeout):
71
- results = asyncio.Future()
72
- message_listener = _make_message_listener(results, execution_id, return_all_outputs, headers)
73
-
74
- client = AblyRealtime(token=token)
75
- channel = client.channels.get(channel_name)
148
+ async def _subscribe_to_workflow_completion_channel(channel, execution_id_future, return_all_outputs, headers):
149
+ results_future = asyncio.Future()
150
+ message_listener = _make_message_listener(results_future, execution_id_future, return_all_outputs, headers)
76
151
  await channel.subscribe(SET_WORKFLOW_COMPLETE_MESSAGE, message_listener)
77
- try:
78
- return await asyncio.wait_for(results, timeout)
79
- except asyncio.TimeoutError as ex:
80
- raise Exception("Workflow execution did not complete properly") from ex
81
- finally:
82
- channel.unsubscribe(SET_WORKFLOW_COMPLETE_MESSAGE, message_listener)
83
- await client.close()
152
+ return results_future, message_listener
84
153
 
85
154
 
86
- async def arun_workflow_request(
155
+ async def _post_workflow_id_run(
87
156
  *,
88
- workflow_name: str,
157
+ authentication_headers,
158
+ workflow_id,
89
159
  input_variables: Dict[str, Any],
90
- metadata: Optional[Dict[str, Any]] = None,
91
- workflow_label_name: Optional[str] = None,
92
- workflow_version_number: Optional[int] = None,
93
- api_key: str,
94
- return_all_outputs: Optional[bool] = False,
95
- timeout: Optional[int] = 3600,
160
+ metadata: Dict[str, Any],
161
+ workflow_label_name: str,
162
+ workflow_version_number: int,
163
+ return_all_outputs: bool,
164
+ channel_name_suffix: str,
165
+ _url_template: str = WORKFLOW_RUN_URL_TEMPLATE,
96
166
  ):
167
+ url = _url_template.format(base_url=URL_API_PROMPTLAYER, workflow_id=workflow_id)
97
168
  payload = {
98
169
  "input_variables": input_variables,
99
170
  "metadata": metadata,
100
171
  "workflow_label_name": workflow_label_name,
101
172
  "workflow_version_number": workflow_version_number,
102
173
  "return_all_outputs": return_all_outputs,
174
+ "channel_name_suffix": channel_name_suffix,
103
175
  }
104
- headers = {"X-API-KEY": api_key}
105
176
  try:
106
- async with httpx.AsyncClient() as client:
107
- response = await client.post(WORKFLOWS_RUN_URL.format(workflow_name), json=payload, headers=headers)
108
- if response.status_code != 201:
109
- raise_on_bad_response(
110
- response,
111
- "PromptLayer had the following error while running your workflow",
112
- )
177
+ async with _make_httpx_client() as client:
178
+ response = await client.post(url, json=payload, headers=authentication_headers)
179
+ if RAISE_FOR_STATUS:
180
+ response.raise_for_status()
181
+ elif response.status_code != 201:
182
+ raise_on_bad_response(response, "PromptLayer had the following error while running your workflow")
113
183
 
114
184
  result = response.json()
115
185
  if warning := result.get("warning"):
116
186
  print(f"WARNING: {warning}")
117
-
118
187
  except Exception as ex:
119
188
  error_message = f"Failed to run workflow: {str(ex)}"
120
- print(error_message)
121
- raise Exception(error_message)
189
+ print(error_message) # TODO(dmu) MEDIUM: Remove prints in favor of logging
190
+ logger.exception(error_message)
191
+ if RERAISE_ORIGINAL_EXCEPTION:
192
+ raise
193
+ else:
194
+ raise Exception(error_message)
195
+
196
+ return result.get("workflow_version_execution_id")
122
197
 
123
- if not (execution_id := result.get("workflow_version_execution_id")):
124
- raise Exception("No execution ID returned from workflow run")
125
198
 
126
- channel_name = f"workflow_updates:{execution_id}"
199
+ async def _wait_for_workflow_completion(channel, results_future, message_listener, timeout):
200
+ # We need this function for mocking in unittests
127
201
  try:
128
- async with httpx.AsyncClient() as client:
129
- ws_response = await client.post(
130
- WS_TOKEN_REQUEST_LIBRARY_URL, headers=headers, params={"capability": channel_name}
131
- )
132
- if ws_response.status_code != 201:
133
- raise_on_bad_response(
134
- ws_response,
135
- "PromptLayer had the following error while getting WebSocket token",
136
- )
137
- token_details = ws_response.json()["token_details"]
138
- except Exception as ex:
139
- error_message = f"Failed to get WebSocket token: {ex}"
140
- print(error_message)
141
- raise Exception(error_message) from ex
142
-
143
- return await _wait_for_workflow_completion(
144
- token=token_details["token"],
145
- channel_name=channel_name,
146
- execution_id=execution_id,
147
- return_all_outputs=return_all_outputs,
148
- headers=headers,
149
- timeout=timeout,
202
+ return await asyncio.wait_for(results_future, timeout)
203
+ except asyncio.TimeoutError:
204
+ raise Exception("Workflow execution did not complete properly")
205
+ finally:
206
+ channel.unsubscribe(SET_WORKFLOW_COMPLETE_MESSAGE, message_listener)
207
+
208
+
209
+ def _make_channel_name_suffix():
210
+ # We need this function for mocking in unittests
211
+ return uuid4().hex
212
+
213
+
214
+ async def arun_workflow_request(
215
+ *,
216
+ workflow_id_or_name: Optional[Union[int, str]] = None,
217
+ input_variables: Dict[str, Any],
218
+ metadata: Optional[Dict[str, Any]] = None,
219
+ workflow_label_name: Optional[str] = None,
220
+ workflow_version_number: Optional[int] = None,
221
+ api_key: str,
222
+ return_all_outputs: Optional[bool] = False,
223
+ timeout: Optional[int] = 3600,
224
+ # `workflow_name` deprecated, kept for backward compatibility only.
225
+ workflow_name: Optional[str] = None,
226
+ ):
227
+ headers = {"X-API-KEY": api_key}
228
+ workflow_id = await _resolve_workflow_id(
229
+ _get_workflow_workflow_id_or_name(workflow_id_or_name, workflow_name), headers
230
+ )
231
+ channel_name_suffix = _make_channel_name_suffix()
232
+ channel_name = WORKFLOW_RUN_CHANNEL_NAME_TEMPLATE.format(
233
+ workflow_id=workflow_id, channel_name_suffix=channel_name_suffix
150
234
  )
235
+ ably_token = await _get_ably_token(channel_name, headers)
236
+ async with AblyRealtime(token=ably_token) as ably_client:
237
+ # It is crucial to subscribe before running a workflow, otherwise we may miss a completion message
238
+ channel = ably_client.channels.get(channel_name)
239
+ execution_id_future = asyncio.Future()
240
+ results_future, message_listener = await _subscribe_to_workflow_completion_channel(
241
+ channel, execution_id_future, return_all_outputs, headers
242
+ )
243
+
244
+ execution_id = await _post_workflow_id_run(
245
+ authentication_headers=headers,
246
+ workflow_id=workflow_id,
247
+ input_variables=input_variables,
248
+ metadata=metadata,
249
+ workflow_label_name=workflow_label_name,
250
+ workflow_version_number=workflow_version_number,
251
+ return_all_outputs=return_all_outputs,
252
+ channel_name_suffix=channel_name_suffix,
253
+ )
254
+ execution_id_future.set_result(execution_id)
255
+
256
+ return await _wait_for_workflow_completion(channel, results_future, message_listener, timeout)
151
257
 
152
258
 
153
259
  def promptlayer_api_handler(
@@ -166,13 +272,7 @@ def promptlayer_api_handler(
166
272
  if (
167
273
  isinstance(response, types.GeneratorType)
168
274
  or isinstance(response, types.AsyncGeneratorType)
169
- or type(response).__name__
170
- in [
171
- "Stream",
172
- "AsyncStream",
173
- "AsyncMessageStreamManager",
174
- "MessageStreamManager",
175
- ]
275
+ or type(response).__name__ in ["Stream", "AsyncStream", "AsyncMessageStreamManager", "MessageStreamManager"]
176
276
  ):
177
277
  return GeneratorProxy(
178
278
  generator=response,
@@ -289,19 +389,14 @@ def promptlayer_api_request(
289
389
  )
290
390
  if not hasattr(request_response, "status_code"):
291
391
  warn_on_bad_response(
292
- request_response,
293
- "WARNING: While logging your request PromptLayer had the following issue",
392
+ request_response, "WARNING: While logging your request PromptLayer had the following issue"
294
393
  )
295
394
  elif request_response.status_code != 200:
296
395
  warn_on_bad_response(
297
- request_response,
298
- "WARNING: While logging your request PromptLayer had the following error",
396
+ request_response, "WARNING: While logging your request PromptLayer had the following error"
299
397
  )
300
398
  except Exception as e:
301
- print(
302
- f"WARNING: While logging your request PromptLayer had the following error: {e}",
303
- file=sys.stderr,
304
- )
399
+ print(f"WARNING: While logging your request PromptLayer had the following error: {e}", file=sys.stderr)
305
400
  if request_response is not None and return_pl_id:
306
401
  return request_response.json().get("request_id")
307
402
 
@@ -314,36 +409,30 @@ def track_request(**body):
314
409
  )
315
410
  if response.status_code != 200:
316
411
  warn_on_bad_response(
317
- response,
318
- f"PromptLayer had the following error while tracking your request: {response.text}",
412
+ response, f"PromptLayer had the following error while tracking your request: {response.text}"
319
413
  )
320
414
  return response.json()
321
415
  except requests.exceptions.RequestException as e:
322
- print(
323
- f"WARNING: While logging your request PromptLayer had the following error: {e}",
324
- file=sys.stderr,
325
- )
416
+ print(f"WARNING: While logging your request PromptLayer had the following error: {e}", file=sys.stderr)
326
417
  return {}
327
418
 
328
419
 
329
420
  async def atrack_request(**body: Any) -> Dict[str, Any]:
330
421
  try:
331
- async with httpx.AsyncClient() as client:
422
+ async with _make_httpx_client() as client:
332
423
  response = await client.post(
333
424
  f"{URL_API_PROMPTLAYER}/track-request",
334
425
  json=body,
335
426
  )
336
- if response.status_code != 200:
337
- warn_on_bad_response(
338
- response,
339
- f"PromptLayer had the following error while tracking your request: {response.text}",
340
- )
427
+ if RAISE_FOR_STATUS:
428
+ response.raise_for_status()
429
+ elif response.status_code != 200:
430
+ warn_on_bad_response(
431
+ response, f"PromptLayer had the following error while tracking your request: {response.text}"
432
+ )
341
433
  return response.json()
342
434
  except httpx.RequestError as e:
343
- print(
344
- f"WARNING: While logging your request PromptLayer had the following error: {e}",
345
- file=sys.stderr,
346
- )
435
+ print(f"WARNING: While logging your request PromptLayer had the following error: {e}", file=sys.stderr)
347
436
  return {}
348
437
 
349
438
 
@@ -467,9 +556,12 @@ async def apromptlayer_track_prompt(
467
556
  "label": label,
468
557
  }
469
558
  try:
470
- async with httpx.AsyncClient() as client:
559
+ async with _make_httpx_client() as client:
471
560
  response = await client.post(url, json=payload)
472
- if response.status_code != 200:
561
+
562
+ if RAISE_FOR_STATUS:
563
+ response.raise_for_status()
564
+ elif response.status_code != 200:
473
565
  warn_on_bad_response(
474
566
  response,
475
567
  "WARNING: While tracking your prompt, PromptLayer had the following error",
@@ -518,9 +610,12 @@ async def apromptlayer_track_metadata(request_id: str, metadata: Dict[str, Any],
518
610
  "api_key": api_key,
519
611
  }
520
612
  try:
521
- async with httpx.AsyncClient() as client:
613
+ async with _make_httpx_client() as client:
522
614
  response = await client.post(url, json=payload)
523
- if response.status_code != 200:
615
+
616
+ if RAISE_FOR_STATUS:
617
+ response.raise_for_status()
618
+ elif response.status_code != 200:
524
619
  warn_on_bad_response(
525
620
  response,
526
621
  "WARNING: While tracking your metadata, PromptLayer had the following error",
@@ -575,9 +670,12 @@ async def apromptlayer_track_score(
575
670
  if score_name is not None:
576
671
  data["name"] = score_name
577
672
  try:
578
- async with httpx.AsyncClient() as client:
673
+ async with _make_httpx_client() as client:
579
674
  response = await client.post(url, json=data)
580
- if response.status_code != 200:
675
+
676
+ if RAISE_FOR_STATUS:
677
+ response.raise_for_status()
678
+ elif response.status_code != 200:
581
679
  warn_on_bad_response(
582
680
  response,
583
681
  "WARNING: While tracking your score, PromptLayer had the following error",
@@ -838,14 +936,17 @@ def promptlayer_create_group(api_key: str = None):
838
936
 
839
937
  async def apromptlayer_create_group(api_key: Optional[str] = None) -> str:
840
938
  try:
841
- async with httpx.AsyncClient() as client:
939
+ async with _make_httpx_client() as client:
842
940
  response = await client.post(
843
941
  f"{URL_API_PROMPTLAYER}/create-group",
844
942
  json={
845
943
  "api_key": api_key,
846
944
  },
847
945
  )
848
- if response.status_code != 200:
946
+
947
+ if RAISE_FOR_STATUS:
948
+ response.raise_for_status()
949
+ elif response.status_code != 200:
849
950
  warn_on_bad_response(
850
951
  response,
851
952
  "WARNING: While creating your group, PromptLayer had the following error",
@@ -885,13 +986,16 @@ async def apromptlayer_track_group(request_id, group_id, api_key: str = None):
885
986
  "request_id": request_id,
886
987
  "group_id": group_id,
887
988
  }
888
- async with httpx.AsyncClient() as client:
989
+ async with _make_httpx_client() as client:
889
990
  response = await client.post(
890
991
  f"{URL_API_PROMPTLAYER}/track-group",
891
992
  headers={"X-API-KEY": api_key},
892
993
  json=payload,
893
994
  )
894
- if response.status_code != 200:
995
+
996
+ if RAISE_FOR_STATUS:
997
+ response.raise_for_status()
998
+ elif response.status_code != 200:
895
999
  warn_on_bad_response(
896
1000
  response,
897
1001
  "WARNING: While tracking your group, PromptLayer had the following error",
@@ -942,13 +1046,16 @@ async def aget_prompt_template(
942
1046
  json_body = {"api_key": api_key}
943
1047
  if params:
944
1048
  json_body.update(params)
945
- async with httpx.AsyncClient() as client:
1049
+ async with _make_httpx_client() as client:
946
1050
  response = await client.post(
947
1051
  f"{URL_API_PROMPTLAYER}/prompt-templates/{prompt_name}",
948
1052
  headers={"X-API-KEY": api_key},
949
1053
  json=json_body,
950
1054
  )
951
- if response.status_code != 200:
1055
+
1056
+ if RAISE_FOR_STATUS:
1057
+ response.raise_for_status()
1058
+ elif response.status_code != 200:
952
1059
  raise_on_bad_response(
953
1060
  response,
954
1061
  "PromptLayer had the following error while getting your prompt template",
@@ -992,7 +1099,7 @@ async def apublish_prompt_template(
992
1099
  api_key: str = None,
993
1100
  ) -> PublishPromptTemplateResponse:
994
1101
  try:
995
- async with httpx.AsyncClient() as client:
1102
+ async with _make_httpx_client() as client:
996
1103
  response = await client.post(
997
1104
  f"{URL_API_PROMPTLAYER}/rest/prompt-templates",
998
1105
  headers={"X-API-KEY": api_key},
@@ -1002,7 +1109,10 @@ async def apublish_prompt_template(
1002
1109
  "release_labels": body.get("release_labels"),
1003
1110
  },
1004
1111
  )
1005
- if response.status_code == 400:
1112
+
1113
+ if RAISE_FOR_STATUS:
1114
+ response.raise_for_status()
1115
+ elif response.status_code == 400:
1006
1116
  raise Exception(
1007
1117
  f"PromptLayer had the following error while publishing your prompt template: {response.text}"
1008
1118
  )
@@ -1039,13 +1149,16 @@ async def aget_all_prompt_templates(
1039
1149
  page: int = 1, per_page: int = 30, api_key: str = None
1040
1150
  ) -> List[ListPromptTemplateResponse]:
1041
1151
  try:
1042
- async with httpx.AsyncClient() as client:
1152
+ async with _make_httpx_client() as client:
1043
1153
  response = await client.get(
1044
1154
  f"{URL_API_PROMPTLAYER}/prompt-templates",
1045
1155
  headers={"X-API-KEY": api_key},
1046
1156
  params={"page": page, "per_page": per_page},
1047
1157
  )
1048
- if response.status_code != 200:
1158
+
1159
+ if RAISE_FOR_STATUS:
1160
+ response.raise_for_status()
1161
+ elif response.status_code != 200:
1049
1162
  raise_on_bad_response(
1050
1163
  response,
1051
1164
  "PromptLayer had the following error while getting all your prompt templates",
@@ -1556,7 +1669,7 @@ def util_log_request(api_key: str, **kwargs) -> Union[RequestLog, None]:
1556
1669
 
1557
1670
  async def autil_log_request(api_key: str, **kwargs) -> Union[RequestLog, None]:
1558
1671
  try:
1559
- async with httpx.AsyncClient() as client:
1672
+ async with _make_httpx_client() as client:
1560
1673
  response = await client.post(
1561
1674
  f"{URL_API_PROMPTLAYER}/log-request",
1562
1675
  headers={"X-API-KEY": api_key},
@@ -1814,7 +1927,7 @@ async def agoogle_completions_request(client, **kwargs):
1814
1927
  stream = kwargs.pop("stream", False)
1815
1928
  if stream:
1816
1929
  return await client.aio.models.generate_content_stream(model=model, contents=contents, config=config)
1817
- return await client.aio.models.generate_content(model=model, contents=contents, config=config)
1930
+ return await client.aio.models.generate_content(model=model, contents=contents, config=config)
1818
1931
 
1819
1932
 
1820
1933
  AMAP_TYPE_TO_GOOGLE_FUNCTION = {
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "promptlayer"
3
- version = "1.0.47"
3
+ version = "1.0.49"
4
4
  description = "PromptLayer is a platform for prompt engineering and tracks your LLM requests."
5
5
  authors = ["Magniv <hello@magniv.io>"]
6
6
  license = "Apache-2.0"
@@ -26,6 +26,7 @@ anthropic = "0.49.0"
26
26
  # TODO(dmu) MEDIUM: Upgrade to vcrpy >= 7 once it supports urllib3 >= 2.2.2
27
27
  vcrpy = "<7.0.0"
28
28
  pytest-network = "^0.0.1"
29
+ pytest-parametrize-cases = "^0.1.2"
29
30
 
30
31
  [build-system]
31
32
  requires = ["poetry-core"]
File without changes
File without changes