promptlayer 1.0.45__py3-none-any.whl → 1.0.47__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

promptlayer/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  from .promptlayer import AsyncPromptLayer, PromptLayer
2
2
 
3
- __version__ = "1.0.45"
3
+ __version__ = "1.0.47"
4
4
  __all__ = ["PromptLayer", "AsyncPromptLayer", "__version__"]
@@ -71,24 +71,17 @@ class PromptLayer(PromptLayerMixin):
71
71
  if name == "openai":
72
72
  import openai as openai_module
73
73
 
74
- openai = PromptLayerBase(
75
- openai_module,
76
- function_name="openai",
77
- api_key=self.api_key,
78
- tracer=self.tracer,
79
- )
80
- return openai
74
+ return PromptLayerBase(openai_module, function_name="openai", api_key=self.api_key, tracer=self.tracer)
81
75
  elif name == "anthropic":
82
76
  import anthropic as anthropic_module
83
77
 
84
- anthropic = PromptLayerBase(
78
+ return PromptLayerBase(
85
79
  anthropic_module,
86
80
  function_name="anthropic",
87
81
  provider_type="anthropic",
88
82
  api_key=self.api_key,
89
83
  tracer=self.tracer,
90
84
  )
91
- return anthropic
92
85
  else:
93
86
  raise AttributeError(f"module {__name__} has no attribute {name}")
94
87
 
@@ -218,7 +211,7 @@ class PromptLayer(PromptLayerMixin):
218
211
  "prompt_name": prompt_name,
219
212
  "prompt_version": prompt_version,
220
213
  "prompt_release_label": prompt_release_label,
221
- "input_variables": input_variables,
214
+ "input_variables": input_variables or {},
222
215
  "model_parameter_overrides": model_parameter_overrides,
223
216
  "tags": tags,
224
217
  "metadata": metadata,
@@ -290,8 +283,8 @@ class PromptLayer(PromptLayerMixin):
290
283
  raise Exception(json.dumps(results, indent=4))
291
284
 
292
285
  return results
293
- except Exception as e:
294
- raise Exception(f"Error running workflow: {str(e)}")
286
+ except Exception as ex:
287
+ raise Exception(f"Error running workflow: {str(ex)}") from ex
295
288
 
296
289
  def log_request(
297
290
  self,
promptlayer/utils.py CHANGED
@@ -26,6 +26,61 @@ from promptlayer.types.prompt_template import (
26
26
  )
27
27
 
28
28
  URL_API_PROMPTLAYER = os.environ.setdefault("URL_API_PROMPTLAYER", "https://api.promptlayer.com")
29
+ WORKFLOWS_RUN_URL = URL_API_PROMPTLAYER + "/workflows/{}/run"
30
+ WS_TOKEN_REQUEST_LIBRARY_URL = URL_API_PROMPTLAYER + "/ws-token-request-library"
31
+
32
+ SET_WORKFLOW_COMPLETE_MESSAGE = "SET_WORKFLOW_COMPLETE"
33
+
34
+
35
+ class FinalOutputCode(Enum):
36
+ OK = "OK"
37
+ EXCEEDS_SIZE_LIMIT = "EXCEEDS_SIZE_LIMIT"
38
+
39
+
40
+ async def _get_final_output(execution_id: int, return_all_outputs: bool, *, headers: Dict[str, str]) -> Dict[str, Any]:
41
+ async with httpx.AsyncClient() as client:
42
+ response = await client.get(
43
+ f"{URL_API_PROMPTLAYER}/workflow-version-execution-results",
44
+ headers=headers,
45
+ params={"workflow_version_execution_id": execution_id, "return_all_outputs": return_all_outputs},
46
+ )
47
+ response.raise_for_status()
48
+ return response.json()
49
+
50
+
51
+ def _make_message_listener(results_future, execution_id, return_all_outputs, headers):
52
+ async def message_listener(message: Message):
53
+ if message.name != SET_WORKFLOW_COMPLETE_MESSAGE: # TODO(dmu) LOW: Do we really need this check?
54
+ return
55
+
56
+ message_data = json.loads(message.data)
57
+ result_code = message_data.get("result_code")
58
+ if result_code in (FinalOutputCode.OK.value, None):
59
+ results = message_data["final_output"]
60
+ elif result_code == FinalOutputCode.EXCEEDS_SIZE_LIMIT.value:
61
+ results = await _get_final_output(execution_id, return_all_outputs, headers=headers)
62
+ else:
63
+ raise NotImplementedError(f"Unsupported final output code: {result_code}")
64
+
65
+ results_future.set_result(results)
66
+
67
+ return message_listener
68
+
69
+
70
+ async def _wait_for_workflow_completion(*, token, channel_name, execution_id, return_all_outputs, headers, timeout):
71
+ results = asyncio.Future()
72
+ message_listener = _make_message_listener(results, execution_id, return_all_outputs, headers)
73
+
74
+ client = AblyRealtime(token=token)
75
+ channel = client.channels.get(channel_name)
76
+ await channel.subscribe(SET_WORKFLOW_COMPLETE_MESSAGE, message_listener)
77
+ try:
78
+ return await asyncio.wait_for(results, timeout)
79
+ except asyncio.TimeoutError as ex:
80
+ raise Exception("Workflow execution did not complete properly") from ex
81
+ finally:
82
+ channel.unsubscribe(SET_WORKFLOW_COMPLETE_MESSAGE, message_listener)
83
+ await client.close()
29
84
 
30
85
 
31
86
  async def arun_workflow_request(
@@ -46,13 +101,10 @@ async def arun_workflow_request(
46
101
  "workflow_version_number": workflow_version_number,
47
102
  "return_all_outputs": return_all_outputs,
48
103
  }
49
-
50
- url = f"{URL_API_PROMPTLAYER}/workflows/{workflow_name}/run"
51
104
  headers = {"X-API-KEY": api_key}
52
-
53
105
  try:
54
106
  async with httpx.AsyncClient() as client:
55
- response = await client.post(url, json=payload, headers=headers)
107
+ response = await client.post(WORKFLOWS_RUN_URL.format(workflow_name), json=payload, headers=headers)
56
108
  if response.status_code != 201:
57
109
  raise_on_bad_response(
58
110
  response,
@@ -60,28 +112,22 @@ async def arun_workflow_request(
60
112
  )
61
113
 
62
114
  result = response.json()
63
- warning = result.get("warning")
64
- if warning:
115
+ if warning := result.get("warning"):
65
116
  print(f"WARNING: {warning}")
66
117
 
67
- except Exception as e:
68
- error_message = f"Failed to run workflow: {str(e)}"
118
+ except Exception as ex:
119
+ error_message = f"Failed to run workflow: {str(ex)}"
69
120
  print(error_message)
70
121
  raise Exception(error_message)
71
122
 
72
- execution_id = result.get("workflow_version_execution_id")
73
- if not execution_id:
123
+ if not (execution_id := result.get("workflow_version_execution_id")):
74
124
  raise Exception("No execution ID returned from workflow run")
75
125
 
76
126
  channel_name = f"workflow_updates:{execution_id}"
77
-
78
- # Get WebSocket token
79
127
  try:
80
128
  async with httpx.AsyncClient() as client:
81
129
  ws_response = await client.post(
82
- f"{URL_API_PROMPTLAYER}/ws-token-request-library",
83
- headers=headers,
84
- params={"capability": channel_name},
130
+ WS_TOKEN_REQUEST_LIBRARY_URL, headers=headers, params={"capability": channel_name}
85
131
  )
86
132
  if ws_response.status_code != 201:
87
133
  raise_on_bad_response(
@@ -89,44 +135,19 @@ async def arun_workflow_request(
89
135
  "PromptLayer had the following error while getting WebSocket token",
90
136
  )
91
137
  token_details = ws_response.json()["token_details"]
92
- except Exception as e:
93
- error_message = f"Failed to get WebSocket token: {e}"
138
+ except Exception as ex:
139
+ error_message = f"Failed to get WebSocket token: {ex}"
94
140
  print(error_message)
95
- raise Exception(error_message)
96
-
97
- # Initialize Ably client
98
- ably_client = AblyRealtime(token=token_details["token"])
99
-
100
- # Subscribe to the channel named after the execution ID
101
- channel = ably_client.channels.get(channel_name)
102
-
103
- results = None
104
- message_received_event = asyncio.Event()
105
-
106
- async def message_listener(message: Message):
107
- nonlocal results
108
-
109
- if message.name == "SET_WORKFLOW_COMPLETE":
110
- message_data = json.loads(message.data)
111
- results = message_data["final_output"]
112
- message_received_event.set()
113
-
114
- # Subscribe to the channel
115
- await channel.subscribe("SET_WORKFLOW_COMPLETE", message_listener)
116
-
117
- # Wait for the message or timeout
118
- try:
119
- await asyncio.wait_for(message_received_event.wait(), timeout)
120
- except asyncio.TimeoutError:
121
- channel.unsubscribe("SET_WORKFLOW_COMPLETE", message_listener)
122
- await ably_client.close()
123
- raise Exception("Workflow execution did not complete properly")
124
-
125
- # Unsubscribe from the channel and close the client
126
- channel.unsubscribe("SET_WORKFLOW_COMPLETE", message_listener)
127
- await ably_client.close()
128
-
129
- return results
141
+ raise Exception(error_message) from ex
142
+
143
+ return await _wait_for_workflow_completion(
144
+ token=token_details["token"],
145
+ channel_name=channel_name,
146
+ execution_id=execution_id,
147
+ return_all_outputs=return_all_outputs,
148
+ headers=headers,
149
+ timeout=timeout,
150
+ )
130
151
 
131
152
 
132
153
  def promptlayer_api_handler(
@@ -1718,7 +1739,7 @@ def google_chat_request(client, **kwargs):
1718
1739
  history = [Content(**item) for item in kwargs.get("history", [])]
1719
1740
  generation_config = kwargs.get("generation_config", {})
1720
1741
  chat = client.chats.create(model=model, history=history, config=generation_config)
1721
- last_message = history[-1] if history else None
1742
+ last_message = history[-1].parts[0] if history else None
1722
1743
  if stream:
1723
1744
  return chat.send_message_stream(message=last_message)
1724
1745
  return chat.send_message(message=last_message)
@@ -1780,7 +1801,7 @@ async def agoogle_chat_request(client, **kwargs):
1780
1801
  history = [Content(**item) for item in kwargs.get("history", [])]
1781
1802
  generation_config = kwargs.get("generation_config", {})
1782
1803
  chat = client.aio.chats.create(model=model, history=history, config=generation_config)
1783
- last_message = history[-1] if history else None
1804
+ last_message = history[-1].parts[0] if history else None
1784
1805
  if stream:
1785
1806
  return await chat.send_message_stream(message=last_message)
1786
1807
  return await chat.send_message(message=last_message)
@@ -1813,7 +1834,7 @@ async def agoogle_request(request: GetPromptTemplateResponse, **kwargs):
1813
1834
  async def amap_google_stream_response(generator: AsyncIterable[Any]):
1814
1835
  from google.genai.chats import GenerateContentResponse
1815
1836
 
1816
- response = GenerateContentResponse()
1837
+ GenerateContentResponse()
1817
1838
  content = ""
1818
1839
  async for result in generator:
1819
1840
  content = f"{content}{result.candidates[0].content.parts[0].text}"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: promptlayer
3
- Version: 1.0.45
3
+ Version: 1.0.47
4
4
  Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
5
5
  License: Apache-2.0
6
6
  Author: Magniv
@@ -15,7 +15,6 @@ Classifier: Programming Language :: Python :: 3.12
15
15
  Classifier: Programming Language :: Python :: 3.13
16
16
  Requires-Dist: ably (>=2.0.11,<3.0.0)
17
17
  Requires-Dist: aiohttp (>=3.10.10,<4.0.0)
18
- Requires-Dist: anthropic (==0.49.0)
19
18
  Requires-Dist: httpx (>=0.28.1,<0.29.0)
20
19
  Requires-Dist: nest-asyncio (>=1.6.0,<2.0.0)
21
20
  Requires-Dist: opentelemetry-api (>=1.26.0,<2.0.0)
@@ -1,7 +1,7 @@
1
- promptlayer/__init__.py,sha256=lF0AT1-uoLyJOLux4SRaLhkoajT2xSDNpe057cQahMA,140
1
+ promptlayer/__init__.py,sha256=hzHTt786KmdtfDauPf3Q1SF9wxA7wLTZsQ-wgZ9B3JI,140
2
2
  promptlayer/groups/__init__.py,sha256=xhOAolLUBkr76ZHvJr29OwjCIk1V9qKQXjZCuyTJUIY,429
3
3
  promptlayer/groups/groups.py,sha256=YPROicy-TzpkrpA8vOpZS2lwvJ6VRtlbQ1S2oT1N0vM,338
4
- promptlayer/promptlayer.py,sha256=j-2quL72MG0N5XzG55ZD8BD1zckVZROx5W8RYI4Y33Y,20776
4
+ promptlayer/promptlayer.py,sha256=RRsZ2vFrLcR1Tb72HDdTpZd2CcXIpzQnZuZrY9eYbyo,20651
5
5
  promptlayer/promptlayer_base.py,sha256=jOgXzNZlV1LKOOsXSSAOgn8o4hXn_EV0oY9Nf3Bsu_s,6872
6
6
  promptlayer/promptlayer_mixins.py,sha256=Plx_zRWOzHxk49Rr5jv6Yq0-mrRfgwzxeU6wS4iahLE,10897
7
7
  promptlayer/span_exporter.py,sha256=wxJoYHsaS0zrMpYShCeHGChWI06DeNHQfeFf5ZRU498,2508
@@ -11,8 +11,8 @@ promptlayer/track/track.py,sha256=A-awcYwsSwxktrlCMchy8NITIquwxU1UXbgLZMwqrA0,31
11
11
  promptlayer/types/__init__.py,sha256=xJcvQuOk91ZBBePb40-1FDNDKYrZoH5lPE2q6_UhprM,111
12
12
  promptlayer/types/prompt_template.py,sha256=GoYSorgBmUgvtyXaGAOv0KgVC61Llzn8bND6PF1fW50,4929
13
13
  promptlayer/types/request_log.py,sha256=xU6bcxQar6GaBOJlgZTavXUV3FjE8sF_nSjPu4Ya_00,174
14
- promptlayer/utils.py,sha256=ojLyY0sQ8aVm2w1YRis3gQyKvdpnpMVD_wytnP6Dj8I,62824
15
- promptlayer-1.0.45.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
16
- promptlayer-1.0.45.dist-info/METADATA,sha256=JYUScUnsix7vjwDzV3zWEbiBoOZYqk7NnIOGSj5a0yU,4855
17
- promptlayer-1.0.45.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
18
- promptlayer-1.0.45.dist-info/RECORD,,
14
+ promptlayer/utils.py,sha256=BECM_zD1uZB2vopru8ykJdFbeDMozWHWzJeVz5Svht0,64129
15
+ promptlayer-1.0.47.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
16
+ promptlayer-1.0.47.dist-info/METADATA,sha256=mtGVfTXcsqTKODoe7XlJyQPBa14BlrjwUn9DX_R9gcc,4819
17
+ promptlayer-1.0.47.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
18
+ promptlayer-1.0.47.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.1.1
2
+ Generator: poetry-core 2.1.2
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any