promptlayer 1.0.35__py3-none-any.whl → 1.0.78__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,6 +2,7 @@ import datetime
2
2
  import inspect
3
3
  import re
4
4
 
5
+ from promptlayer import exceptions as _exceptions
5
6
  from promptlayer.utils import async_wrapper, promptlayer_api_handler
6
7
 
7
8
 
@@ -13,47 +14,41 @@ class PromptLayerBase(object):
13
14
  "_provider_type",
14
15
  "_api_key",
15
16
  "_tracer",
17
+ "_base_url",
16
18
  ]
17
19
 
18
- def __init__(
19
- self, obj, function_name="", provider_type="openai", api_key=None, tracer=None
20
- ):
20
+ def __init__(self, api_key: str, base_url: str, obj, function_name="", provider_type="openai", tracer=None):
21
21
  object.__setattr__(self, "_obj", obj)
22
22
  object.__setattr__(self, "_function_name", function_name)
23
23
  object.__setattr__(self, "_provider_type", provider_type)
24
24
  object.__setattr__(self, "_api_key", api_key)
25
25
  object.__setattr__(self, "_tracer", tracer)
26
+ object.__setattr__(self, "_base_url", base_url)
26
27
 
27
28
  def __getattr__(self, name):
28
29
  attr = getattr(object.__getattribute__(self, "_obj"), name)
29
30
 
30
31
  if (
31
32
  name != "count_tokens" # fix for anthropic count_tokens
32
- and not re.match(
33
- r"<class 'anthropic\..*Error'>", str(attr)
34
- ) # fix for anthropic errors
35
- and not re.match(
36
- r"<class 'openai\..*Error'>", str(attr)
37
- ) # fix for openai errors
33
+ and not re.match(r"<class 'anthropic\..*Error'>", str(attr)) # fix for anthropic errors
34
+ and not re.match(r"<class 'openai\..*Error'>", str(attr)) # fix for openai errors
38
35
  and (
39
36
  inspect.isclass(attr)
40
37
  or inspect.isfunction(attr)
41
38
  or inspect.ismethod(attr)
42
- or str(type(attr))
43
- == "<class 'anthropic.resources.completions.Completions'>"
44
- or str(type(attr))
45
- == "<class 'anthropic.resources.completions.AsyncCompletions'>"
46
- or str(type(attr)) == "<class 'anthropic.resources.messages.Messages'>"
47
- or str(type(attr))
48
- == "<class 'anthropic.resources.messages.AsyncMessages'>"
39
+ or str(type(attr)) == "<class 'anthropic.resources.completions.Completions'>"
40
+ or str(type(attr)) == "<class 'anthropic.resources.completions.AsyncCompletions'>"
41
+ or str(type(attr)) == "<class 'anthropic.resources.messages.messages.Messages'>"
42
+ or str(type(attr)) == "<class 'anthropic.resources.messages.messages.AsyncMessages'>"
49
43
  or re.match(r"<class 'openai\.resources.*'>", str(type(attr)))
50
44
  )
51
45
  ):
52
46
  return PromptLayerBase(
47
+ object.__getattribute__(self, "_api_key"),
48
+ object.__getattribute__(self, "_base_url"),
53
49
  attr,
54
- function_name=f'{object.__getattribute__(self, "_function_name")}.{name}',
50
+ function_name=f"{object.__getattribute__(self, '_function_name')}.{name}",
55
51
  provider_type=object.__getattribute__(self, "_provider_type"),
56
- api_key=object.__getattribute__(self, "_api_key"),
57
52
  tracer=object.__getattribute__(self, "_tracer"),
58
53
  )
59
54
  return attr
@@ -67,7 +62,7 @@ class PromptLayerBase(object):
67
62
  def __call__(self, *args, **kwargs):
68
63
  tags = kwargs.pop("pl_tags", None)
69
64
  if tags is not None and not isinstance(tags, list):
70
- raise Exception("pl_tags must be a list of strings.")
65
+ raise _exceptions.PromptLayerValidationError("pl_tags must be a list of strings.", response=None, body=None)
71
66
 
72
67
  return_pl_id = kwargs.pop("return_pl_id", False)
73
68
  request_start_time = datetime.datetime.now().timestamp()
@@ -77,23 +72,18 @@ class PromptLayerBase(object):
77
72
 
78
73
  if tracer:
79
74
  with tracer.start_as_current_span(function_name) as llm_request_span:
80
- llm_request_span_id = hex(llm_request_span.context.span_id)[2:].zfill(
81
- 16
82
- )
83
- llm_request_span.set_attribute(
84
- "provider", object.__getattribute__(self, "_provider_type")
85
- )
75
+ llm_request_span_id = hex(llm_request_span.context.span_id)[2:].zfill(16)
76
+ llm_request_span.set_attribute("provider", object.__getattribute__(self, "_provider_type"))
86
77
  llm_request_span.set_attribute("function_name", function_name)
87
- llm_request_span.set_attribute(
88
- "function_input", str({"args": args, "kwargs": kwargs})
89
- )
78
+ llm_request_span.set_attribute("function_input", str({"args": args, "kwargs": kwargs}))
90
79
 
91
80
  if inspect.isclass(function_object):
92
81
  result = PromptLayerBase(
82
+ object.__getattribute__(self, "_api_key"),
83
+ object.__getattribute__(self, "_base_url"),
93
84
  function_object(*args, **kwargs),
94
85
  function_name=function_name,
95
86
  provider_type=object.__getattribute__(self, "_provider_type"),
96
- api_key=object.__getattribute__(self, "_api_key"),
97
87
  tracer=tracer,
98
88
  )
99
89
  llm_request_span.set_attribute("function_output", str(result))
@@ -101,17 +91,16 @@ class PromptLayerBase(object):
101
91
 
102
92
  function_response = function_object(*args, **kwargs)
103
93
 
104
- if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(
105
- function_response
106
- ):
94
+ if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(function_response):
107
95
  return async_wrapper(
96
+ object.__getattribute__(self, "_api_key"),
97
+ object.__getattribute__(self, "_base_url"),
108
98
  function_response,
109
99
  return_pl_id,
110
100
  request_start_time,
111
101
  function_name,
112
102
  object.__getattribute__(self, "_provider_type"),
113
103
  tags,
114
- api_key=object.__getattribute__(self, "_api_key"),
115
104
  llm_request_span_id=llm_request_span_id,
116
105
  tracer=tracer, # Pass the tracer to async_wrapper
117
106
  *args,
@@ -120,6 +109,8 @@ class PromptLayerBase(object):
120
109
 
121
110
  request_end_time = datetime.datetime.now().timestamp()
122
111
  result = promptlayer_api_handler(
112
+ object.__getattribute__(self, "_api_key"),
113
+ object.__getattribute__(self, "_base_url"),
123
114
  function_name,
124
115
  object.__getattribute__(self, "_provider_type"),
125
116
  args,
@@ -128,7 +119,6 @@ class PromptLayerBase(object):
128
119
  function_response,
129
120
  request_start_time,
130
121
  request_end_time,
131
- object.__getattribute__(self, "_api_key"),
132
122
  return_pl_id=return_pl_id,
133
123
  llm_request_span_id=llm_request_span_id,
134
124
  )
@@ -138,31 +128,33 @@ class PromptLayerBase(object):
138
128
  # Without tracing
139
129
  if inspect.isclass(function_object):
140
130
  return PromptLayerBase(
131
+ object.__getattribute__(self, "_api_key"),
132
+ object.__getattribute__(self, "_base_url"),
141
133
  function_object(*args, **kwargs),
142
134
  function_name=function_name,
143
135
  provider_type=object.__getattribute__(self, "_provider_type"),
144
- api_key=object.__getattribute__(self, "_api_key"),
145
136
  )
146
137
 
147
138
  function_response = function_object(*args, **kwargs)
148
139
 
149
- if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(
150
- function_response
151
- ):
140
+ if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(function_response):
152
141
  return async_wrapper(
142
+ object.__getattribute__(self, "_api_key"),
143
+ object.__getattribute__(self, "_base_url"),
153
144
  function_response,
154
145
  return_pl_id,
155
146
  request_start_time,
156
147
  function_name,
157
148
  object.__getattribute__(self, "_provider_type"),
158
149
  tags,
159
- api_key=object.__getattribute__(self, "_api_key"),
160
150
  *args,
161
151
  **kwargs,
162
152
  )
163
153
 
164
154
  request_end_time = datetime.datetime.now().timestamp()
165
155
  return promptlayer_api_handler(
156
+ object.__getattribute__(self, "_api_key"),
157
+ object.__getattribute__(self, "_base_url"),
166
158
  function_name,
167
159
  object.__getattribute__(self, "_provider_type"),
168
160
  args,
@@ -171,6 +163,5 @@ class PromptLayerBase(object):
171
163
  function_response,
172
164
  request_start_time,
173
165
  request_end_time,
174
- object.__getattribute__(self, "_api_key"),
175
166
  return_pl_id=return_pl_id,
176
167
  )
@@ -2,7 +2,7 @@ import asyncio
2
2
  import datetime
3
3
  from copy import deepcopy
4
4
  from functools import wraps
5
- from typing import Dict, Union
5
+ from typing import Any, Dict, Union
6
6
 
7
7
  from opentelemetry.sdk.resources import Resource
8
8
  from opentelemetry.sdk.trace import TracerProvider
@@ -10,29 +10,47 @@ from opentelemetry.sdk.trace.export import BatchSpanProcessor
10
10
  from opentelemetry.semconv.resource import ResourceAttributes
11
11
 
12
12
  from promptlayer.span_exporter import PromptLayerSpanExporter
13
- from promptlayer.utils import (
14
- aanthropic_request,
13
+ from promptlayer.streaming import (
15
14
  aanthropic_stream_completion,
16
15
  aanthropic_stream_message,
17
- aazure_openai_request,
18
- amistral_request,
16
+ abedrock_stream_message,
17
+ agoogle_stream_chat,
18
+ agoogle_stream_completion,
19
19
  amistral_stream_chat,
20
- anthropic_request,
21
20
  anthropic_stream_completion,
22
21
  anthropic_stream_message,
23
- aopenai_request,
22
+ aopenai_responses_stream_chat,
24
23
  aopenai_stream_chat,
25
24
  aopenai_stream_completion,
26
- azure_openai_request,
27
- mistral_request,
25
+ bedrock_stream_message,
26
+ google_stream_chat,
27
+ google_stream_completion,
28
28
  mistral_stream_chat,
29
- openai_request,
29
+ openai_responses_stream_chat,
30
30
  openai_stream_chat,
31
31
  openai_stream_completion,
32
32
  )
33
+ from promptlayer.utils import (
34
+ aamazon_bedrock_request,
35
+ aanthropic_bedrock_request,
36
+ aanthropic_request,
37
+ aazure_openai_request,
38
+ agoogle_request,
39
+ amazon_bedrock_request,
40
+ amistral_request,
41
+ anthropic_bedrock_request,
42
+ anthropic_request,
43
+ aopenai_request,
44
+ avertexai_request,
45
+ azure_openai_request,
46
+ google_request,
47
+ mistral_request,
48
+ openai_request,
49
+ vertexai_request,
50
+ )
33
51
 
34
52
  MAP_PROVIDER_TO_FUNCTION_NAME = {
35
- "openai": {
53
+ "openai:chat-completions": {
36
54
  "chat": {
37
55
  "function_name": "openai.chat.completions.create",
38
56
  "stream_function": openai_stream_chat,
@@ -42,6 +60,16 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
42
60
  "stream_function": openai_stream_completion,
43
61
  },
44
62
  },
63
+ "openai:responses": {
64
+ "chat": {
65
+ "function_name": "openai.responses.create",
66
+ "stream_function": openai_responses_stream_chat,
67
+ },
68
+ "completion": {
69
+ "function_name": "openai.responses.create",
70
+ "stream_function": openai_responses_stream_chat,
71
+ },
72
+ },
45
73
  "anthropic": {
46
74
  "chat": {
47
75
  "function_name": "anthropic.messages.create",
@@ -52,7 +80,7 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
52
80
  "stream_function": anthropic_stream_completion,
53
81
  },
54
82
  },
55
- "openai.azure": {
83
+ "openai.azure:chat-completions": {
56
84
  "chat": {
57
85
  "function_name": "openai.AzureOpenAI.chat.completions.create",
58
86
  "stream_function": openai_stream_chat,
@@ -62,6 +90,16 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
62
90
  "stream_function": openai_stream_completion,
63
91
  },
64
92
  },
93
+ "openai.azure:responses": {
94
+ "chat": {
95
+ "function_name": "openai.AzureOpenAI.responses.create",
96
+ "stream_function": openai_responses_stream_chat,
97
+ },
98
+ "completion": {
99
+ "function_name": "openai.AzureOpenAI.responses.create",
100
+ "stream_function": openai_responses_stream_chat,
101
+ },
102
+ },
65
103
  "mistral": {
66
104
  "chat": {
67
105
  "function_name": "mistral.client.chat",
@@ -72,18 +110,52 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
72
110
  "stream_function": None,
73
111
  },
74
112
  },
113
+ "google": {
114
+ "chat": {
115
+ "function_name": "google.convo.send_message",
116
+ "stream_function": google_stream_chat,
117
+ },
118
+ "completion": {
119
+ "function_name": "google.model.generate_content",
120
+ "stream_function": google_stream_completion,
121
+ },
122
+ },
123
+ "amazon.bedrock": {
124
+ "chat": {
125
+ "function_name": "boto3.bedrock-runtime.converse",
126
+ "stream_function": bedrock_stream_message,
127
+ },
128
+ "completion": {
129
+ "function_name": "boto3.bedrock-runtime.converse",
130
+ "stream_function": bedrock_stream_message,
131
+ },
132
+ },
133
+ "anthropic.bedrock": {
134
+ "chat": {
135
+ "function_name": "anthropic.messages.create",
136
+ "stream_function": anthropic_stream_message,
137
+ },
138
+ "completion": {
139
+ "function_name": "anthropic.completions.create",
140
+ "stream_function": anthropic_stream_completion,
141
+ },
142
+ },
75
143
  }
76
144
 
77
145
 
78
146
  MAP_PROVIDER_TO_FUNCTION = {
79
- "openai": openai_request,
80
147
  "anthropic": anthropic_request,
81
- "openai.azure": azure_openai_request,
148
+ "google": google_request,
82
149
  "mistral": mistral_request,
150
+ "openai": openai_request,
151
+ "openai.azure": azure_openai_request,
152
+ "vertexai": vertexai_request,
153
+ "amazon.bedrock": amazon_bedrock_request,
154
+ "anthropic.bedrock": anthropic_bedrock_request,
83
155
  }
84
156
 
85
157
  AMAP_PROVIDER_TO_FUNCTION_NAME = {
86
- "openai": {
158
+ "openai:chat-completions": {
87
159
  "chat": {
88
160
  "function_name": "openai.chat.completions.create",
89
161
  "stream_function": aopenai_stream_chat,
@@ -93,6 +165,16 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
93
165
  "stream_function": aopenai_stream_completion,
94
166
  },
95
167
  },
168
+ "openai:responses": {
169
+ "chat": {
170
+ "function_name": "openai.responses.create",
171
+ "stream_function": aopenai_responses_stream_chat,
172
+ },
173
+ "completion": {
174
+ "function_name": "openai.responses.create",
175
+ "stream_function": aopenai_responses_stream_chat,
176
+ },
177
+ },
96
178
  "anthropic": {
97
179
  "chat": {
98
180
  "function_name": "anthropic.messages.create",
@@ -103,7 +185,7 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
103
185
  "stream_function": aanthropic_stream_completion,
104
186
  },
105
187
  },
106
- "openai.azure": {
188
+ "openai.azure:chat-completions": {
107
189
  "chat": {
108
190
  "function_name": "openai.AzureOpenAI.chat.completions.create",
109
191
  "stream_function": aopenai_stream_chat,
@@ -113,6 +195,16 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
113
195
  "stream_function": aopenai_stream_completion,
114
196
  },
115
197
  },
198
+ "openai.azure:responses": {
199
+ "chat": {
200
+ "function_name": "openai.AzureOpenAI.responses.create",
201
+ "stream_function": aopenai_responses_stream_chat,
202
+ },
203
+ "completion": {
204
+ "function_name": "openai.AzureOpenAI.responses.create",
205
+ "stream_function": aopenai_responses_stream_chat,
206
+ },
207
+ },
116
208
  "mistral": {
117
209
  "chat": {
118
210
  "function_name": "mistral.client.chat",
@@ -123,26 +215,60 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
123
215
  "stream_function": None,
124
216
  },
125
217
  },
218
+ "google": {
219
+ "chat": {
220
+ "function_name": "google.convo.send_message",
221
+ "stream_function": agoogle_stream_chat,
222
+ },
223
+ "completion": {
224
+ "function_name": "google.model.generate_content",
225
+ "stream_function": agoogle_stream_completion,
226
+ },
227
+ },
228
+ "amazon.bedrock": {
229
+ "chat": {
230
+ "function_name": "boto3.bedrock-runtime.converse",
231
+ "stream_function": abedrock_stream_message,
232
+ },
233
+ "completion": {
234
+ "function_name": "boto3.bedrock-runtime.converse",
235
+ "stream_function": abedrock_stream_message,
236
+ },
237
+ },
238
+ "anthropic.bedrock": {
239
+ "chat": {
240
+ "function_name": "anthropic.messages.create",
241
+ "stream_function": aanthropic_stream_message,
242
+ },
243
+ "completion": {
244
+ "function_name": "anthropic.completions.create",
245
+ "stream_function": aanthropic_stream_completion,
246
+ },
247
+ },
126
248
  }
127
249
 
128
250
 
129
251
  AMAP_PROVIDER_TO_FUNCTION = {
130
- "openai": aopenai_request,
131
252
  "anthropic": aanthropic_request,
132
- "openai.azure": aazure_openai_request,
253
+ "google": agoogle_request,
133
254
  "mistral": amistral_request,
255
+ "openai": aopenai_request,
256
+ "openai.azure": aazure_openai_request,
257
+ "vertexai": avertexai_request,
258
+ "amazon.bedrock": aamazon_bedrock_request,
259
+ "anthropic.bedrock": aanthropic_bedrock_request,
134
260
  }
135
261
 
136
262
 
137
263
  class PromptLayerMixin:
138
264
  @staticmethod
139
- def _initialize_tracer(api_key: str = None, enable_tracing: bool = False):
265
+ def _initialize_tracer(api_key: str, base_url: str, throw_on_error: bool, enable_tracing: bool = False):
140
266
  if enable_tracing:
141
- resource = Resource(
142
- attributes={ResourceAttributes.SERVICE_NAME: "prompt-layer-library"}
143
- )
267
+ resource = Resource(attributes={ResourceAttributes.SERVICE_NAME: "prompt-layer-library"})
144
268
  tracer_provider = TracerProvider(resource=resource)
145
- promptlayer_exporter = PromptLayerSpanExporter(api_key=api_key)
269
+ promptlayer_exporter = PromptLayerSpanExporter(
270
+ api_key=api_key, base_url=base_url, throw_on_error=throw_on_error
271
+ )
146
272
  span_processor = BatchSpanProcessor(promptlayer_exporter)
147
273
  tracer_provider.add_span_processor(span_processor)
148
274
  tracer = tracer_provider.get_tracer(__name__)
@@ -152,8 +278,15 @@ class PromptLayerMixin:
152
278
 
153
279
  @staticmethod
154
280
  def _prepare_get_prompt_template_params(
155
- *, prompt_version, prompt_release_label, input_variables, metadata
156
- ):
281
+ *,
282
+ prompt_version: Union[int, None],
283
+ prompt_release_label: Union[str, None],
284
+ input_variables: Union[Dict[str, Any], None],
285
+ metadata: Union[Dict[str, str], None],
286
+ provider: Union[str, None] = None,
287
+ model: Union[str, None] = None,
288
+ model_parameter_overrides: Union[Dict[str, Any], None] = None,
289
+ ) -> Dict[str, Any]:
157
290
  params = {}
158
291
 
159
292
  if prompt_version:
@@ -164,69 +297,86 @@ class PromptLayerMixin:
164
297
  params["input_variables"] = input_variables
165
298
  if metadata:
166
299
  params["metadata_filters"] = metadata
300
+ if provider:
301
+ params["provider"] = provider
302
+ if model:
303
+ params["model"] = model
304
+ if model_parameter_overrides:
305
+ params["model_parameter_overrides"] = model_parameter_overrides
167
306
 
168
307
  return params
169
308
 
170
309
  @staticmethod
171
- def _prepare_llm_request_params(
310
+ def _prepare_llm_data(
172
311
  *,
173
312
  prompt_blueprint,
174
313
  prompt_template,
175
314
  prompt_blueprint_model,
176
- model_parameter_overrides,
177
315
  stream,
178
316
  is_async=False,
179
317
  ):
318
+ client_kwargs = {}
319
+ function_kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
320
+ function_kwargs["stream"] = stream
180
321
  provider = prompt_blueprint_model["provider"]
181
- kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
322
+ api_type = prompt_blueprint_model.get("api_type", "chat-completions")
323
+
324
+ if custom_provider := prompt_blueprint.get("custom_provider"):
325
+ provider = custom_provider["client"]
326
+ client_kwargs = {
327
+ "api_key": custom_provider["api_key"],
328
+ "base_url": custom_provider["base_url"],
329
+ }
330
+ elif provider_base_url := prompt_blueprint.get("provider_base_url"):
331
+ client_kwargs["base_url"] = provider_base_url["url"]
332
+
333
+ if stream and provider in ["openai", "openai.azure"] and api_type == "chat-completions":
334
+ function_kwargs["stream_options"] = {"include_usage": True}
335
+
336
+ provider_function_name = provider
337
+ if provider_function_name == "vertexai":
338
+ if "gemini" in prompt_blueprint_model["name"]:
339
+ provider_function_name = "google"
340
+ elif "claude" in prompt_blueprint_model["name"]:
341
+ provider_function_name = "anthropic"
342
+
343
+ if provider_function_name in ("openai", "openai.azure"):
344
+ api = api_type if api_type is not None else "chat-completions"
345
+ provider_function_name = f"{provider_function_name}:{api}"
346
+
182
347
  if is_async:
183
- config = AMAP_PROVIDER_TO_FUNCTION_NAME[provider][prompt_template["type"]]
348
+ config = AMAP_PROVIDER_TO_FUNCTION_NAME[provider_function_name][prompt_template["type"]]
184
349
  request_function = AMAP_PROVIDER_TO_FUNCTION[provider]
185
350
  else:
186
- config = MAP_PROVIDER_TO_FUNCTION_NAME[provider][prompt_template["type"]]
351
+ config = MAP_PROVIDER_TO_FUNCTION_NAME[provider_function_name][prompt_template["type"]]
187
352
  request_function = MAP_PROVIDER_TO_FUNCTION[provider]
188
353
 
189
- if provider_base_url := prompt_blueprint.get("provider_base_url"):
190
- kwargs["base_url"] = provider_base_url["url"]
191
-
192
- if model_parameter_overrides:
193
- kwargs.update(model_parameter_overrides)
194
-
195
- kwargs["stream"] = stream
196
- if stream and provider in ["openai", "openai.azure"]:
197
- kwargs["stream_options"] = {"include_usage": True}
198
-
199
354
  return {
200
355
  "provider": provider,
201
356
  "function_name": config["function_name"],
202
357
  "stream_function": config["stream_function"],
203
358
  "request_function": request_function,
204
- "kwargs": kwargs,
359
+ "client_kwargs": client_kwargs,
360
+ "function_kwargs": function_kwargs,
205
361
  "prompt_blueprint": prompt_blueprint,
206
362
  }
207
363
 
208
364
  @staticmethod
209
- def _validate_and_extract_model_from_prompt_blueprint(
210
- *, prompt_blueprint, prompt_name
211
- ):
365
+ def _validate_and_extract_model_from_prompt_blueprint(*, prompt_blueprint, prompt_name):
212
366
  if not prompt_blueprint["llm_kwargs"]:
213
367
  raise ValueError(
214
- f"Prompt '{prompt_name}' does not have any LLM kwargs associated with it."
368
+ f"Prompt '{prompt_name}' does not have any LLM kwargs associated with it. Please set your model parameters in the registry in the PromptLayer dashbaord."
215
369
  )
216
370
 
217
371
  prompt_blueprint_metadata = prompt_blueprint.get("metadata")
218
372
 
219
373
  if not prompt_blueprint_metadata:
220
- raise ValueError(
221
- f"Prompt '{prompt_name}' does not have any metadata associated with it."
222
- )
374
+ raise ValueError(f"Prompt '{prompt_name}' does not have any metadata associated with it.")
223
375
 
224
376
  prompt_blueprint_model = prompt_blueprint_metadata.get("model")
225
377
 
226
378
  if not prompt_blueprint_model:
227
- raise ValueError(
228
- f"Prompt '{prompt_name}' does not have a model parameters associated with it."
229
- )
379
+ raise ValueError(f"Prompt '{prompt_name}' does not have a model parameters associated with it.")
230
380
 
231
381
  return prompt_blueprint_model
232
382
 
@@ -239,25 +389,30 @@ class PromptLayerMixin:
239
389
  group_id,
240
390
  pl_run_span_id: Union[str, None] = None,
241
391
  metadata: Union[Dict[str, str], None] = None,
392
+ request_start_time: Union[float, None] = None,
393
+ request_end_time: Union[float, None] = None,
242
394
  **body,
243
395
  ):
396
+ # If timestamps are not provided, generate them (for backward compatibility)
397
+ # But note that this is the old buggy behavior
398
+ if request_start_time is None:
399
+ request_start_time = datetime.datetime.now(datetime.timezone.utc).timestamp()
400
+ if request_end_time is None:
401
+ request_end_time = datetime.datetime.now(datetime.timezone.utc).timestamp()
402
+
244
403
  return {
245
404
  "function_name": request_params["function_name"],
246
405
  "provider_type": request_params["provider"],
247
406
  "args": [],
248
- "kwargs": request_params["kwargs"],
407
+ "kwargs": request_params["function_kwargs"],
249
408
  "tags": tags,
250
- "request_start_time": datetime.datetime.now(
251
- datetime.timezone.utc
252
- ).timestamp(),
253
- "request_end_time": datetime.datetime.now(
254
- datetime.timezone.utc
255
- ).timestamp(),
409
+ "request_start_time": request_start_time,
410
+ "request_end_time": request_end_time,
256
411
  "api_key": api_key,
257
412
  "metadata": metadata,
258
413
  "prompt_id": request_params["prompt_blueprint"]["id"],
259
414
  "prompt_version": request_params["prompt_blueprint"]["version"],
260
- "prompt_input_variables": input_variables,
415
+ "prompt_input_variables": input_variables or {},
261
416
  "group_id": group_id,
262
417
  "return_prompt_blueprint": True,
263
418
  "span_id": pl_run_span_id,
@@ -275,9 +430,7 @@ class PromptLayerMixin:
275
430
  for key, value in attributes.items():
276
431
  span.set_attribute(key, value)
277
432
 
278
- span.set_attribute(
279
- "function_input", str({"args": args, "kwargs": kwargs})
280
- )
433
+ span.set_attribute("function_input", str({"args": args, "kwargs": kwargs}))
281
434
  result = func(*args, **kwargs)
282
435
  span.set_attribute("function_output", str(result))
283
436
 
@@ -294,9 +447,7 @@ class PromptLayerMixin:
294
447
  for key, value in attributes.items():
295
448
  span.set_attribute(key, value)
296
449
 
297
- span.set_attribute(
298
- "function_input", str({"args": args, "kwargs": kwargs})
299
- )
450
+ span.set_attribute("function_input", str({"args": args, "kwargs": kwargs}))
300
451
  result = await func(*args, **kwargs)
301
452
  span.set_attribute("function_output", str(result))
302
453