promptlayer 1.0.16__py3-none-any.whl → 1.0.78__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,6 +2,7 @@ import datetime
2
2
  import inspect
3
3
  import re
4
4
 
5
+ from promptlayer import exceptions as _exceptions
5
6
  from promptlayer.utils import async_wrapper, promptlayer_api_handler
6
7
 
7
8
 
@@ -13,47 +14,41 @@ class PromptLayerBase(object):
13
14
  "_provider_type",
14
15
  "_api_key",
15
16
  "_tracer",
17
+ "_base_url",
16
18
  ]
17
19
 
18
- def __init__(
19
- self, obj, function_name="", provider_type="openai", api_key=None, tracer=None
20
- ):
20
+ def __init__(self, api_key: str, base_url: str, obj, function_name="", provider_type="openai", tracer=None):
21
21
  object.__setattr__(self, "_obj", obj)
22
22
  object.__setattr__(self, "_function_name", function_name)
23
23
  object.__setattr__(self, "_provider_type", provider_type)
24
24
  object.__setattr__(self, "_api_key", api_key)
25
25
  object.__setattr__(self, "_tracer", tracer)
26
+ object.__setattr__(self, "_base_url", base_url)
26
27
 
27
28
  def __getattr__(self, name):
28
29
  attr = getattr(object.__getattribute__(self, "_obj"), name)
29
30
 
30
31
  if (
31
32
  name != "count_tokens" # fix for anthropic count_tokens
32
- and not re.match(
33
- r"<class 'anthropic\..*Error'>", str(attr)
34
- ) # fix for anthropic errors
35
- and not re.match(
36
- r"<class 'openai\..*Error'>", str(attr)
37
- ) # fix for openai errors
33
+ and not re.match(r"<class 'anthropic\..*Error'>", str(attr)) # fix for anthropic errors
34
+ and not re.match(r"<class 'openai\..*Error'>", str(attr)) # fix for openai errors
38
35
  and (
39
36
  inspect.isclass(attr)
40
37
  or inspect.isfunction(attr)
41
38
  or inspect.ismethod(attr)
42
- or str(type(attr))
43
- == "<class 'anthropic.resources.completions.Completions'>"
44
- or str(type(attr))
45
- == "<class 'anthropic.resources.completions.AsyncCompletions'>"
46
- or str(type(attr)) == "<class 'anthropic.resources.messages.Messages'>"
47
- or str(type(attr))
48
- == "<class 'anthropic.resources.messages.AsyncMessages'>"
39
+ or str(type(attr)) == "<class 'anthropic.resources.completions.Completions'>"
40
+ or str(type(attr)) == "<class 'anthropic.resources.completions.AsyncCompletions'>"
41
+ or str(type(attr)) == "<class 'anthropic.resources.messages.messages.Messages'>"
42
+ or str(type(attr)) == "<class 'anthropic.resources.messages.messages.AsyncMessages'>"
49
43
  or re.match(r"<class 'openai\.resources.*'>", str(type(attr)))
50
44
  )
51
45
  ):
52
46
  return PromptLayerBase(
47
+ object.__getattribute__(self, "_api_key"),
48
+ object.__getattribute__(self, "_base_url"),
53
49
  attr,
54
- function_name=f'{object.__getattribute__(self, "_function_name")}.{name}',
50
+ function_name=f"{object.__getattribute__(self, '_function_name')}.{name}",
55
51
  provider_type=object.__getattribute__(self, "_provider_type"),
56
- api_key=object.__getattribute__(self, "_api_key"),
57
52
  tracer=object.__getattribute__(self, "_tracer"),
58
53
  )
59
54
  return attr
@@ -67,7 +62,7 @@ class PromptLayerBase(object):
67
62
  def __call__(self, *args, **kwargs):
68
63
  tags = kwargs.pop("pl_tags", None)
69
64
  if tags is not None and not isinstance(tags, list):
70
- raise Exception("pl_tags must be a list of strings.")
65
+ raise _exceptions.PromptLayerValidationError("pl_tags must be a list of strings.", response=None, body=None)
71
66
 
72
67
  return_pl_id = kwargs.pop("return_pl_id", False)
73
68
  request_start_time = datetime.datetime.now().timestamp()
@@ -77,23 +72,18 @@ class PromptLayerBase(object):
77
72
 
78
73
  if tracer:
79
74
  with tracer.start_as_current_span(function_name) as llm_request_span:
80
- llm_request_span_id = hex(llm_request_span.context.span_id)[2:].zfill(
81
- 16
82
- )
83
- llm_request_span.set_attribute(
84
- "provider", object.__getattribute__(self, "_provider_type")
85
- )
75
+ llm_request_span_id = hex(llm_request_span.context.span_id)[2:].zfill(16)
76
+ llm_request_span.set_attribute("provider", object.__getattribute__(self, "_provider_type"))
86
77
  llm_request_span.set_attribute("function_name", function_name)
87
- llm_request_span.set_attribute(
88
- "function_input", str({"args": args, "kwargs": kwargs})
89
- )
78
+ llm_request_span.set_attribute("function_input", str({"args": args, "kwargs": kwargs}))
90
79
 
91
80
  if inspect.isclass(function_object):
92
81
  result = PromptLayerBase(
82
+ object.__getattribute__(self, "_api_key"),
83
+ object.__getattribute__(self, "_base_url"),
93
84
  function_object(*args, **kwargs),
94
85
  function_name=function_name,
95
86
  provider_type=object.__getattribute__(self, "_provider_type"),
96
- api_key=object.__getattribute__(self, "_api_key"),
97
87
  tracer=tracer,
98
88
  )
99
89
  llm_request_span.set_attribute("function_output", str(result))
@@ -101,17 +91,16 @@ class PromptLayerBase(object):
101
91
 
102
92
  function_response = function_object(*args, **kwargs)
103
93
 
104
- if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(
105
- function_response
106
- ):
94
+ if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(function_response):
107
95
  return async_wrapper(
96
+ object.__getattribute__(self, "_api_key"),
97
+ object.__getattribute__(self, "_base_url"),
108
98
  function_response,
109
99
  return_pl_id,
110
100
  request_start_time,
111
101
  function_name,
112
102
  object.__getattribute__(self, "_provider_type"),
113
103
  tags,
114
- api_key=object.__getattribute__(self, "_api_key"),
115
104
  llm_request_span_id=llm_request_span_id,
116
105
  tracer=tracer, # Pass the tracer to async_wrapper
117
106
  *args,
@@ -120,6 +109,8 @@ class PromptLayerBase(object):
120
109
 
121
110
  request_end_time = datetime.datetime.now().timestamp()
122
111
  result = promptlayer_api_handler(
112
+ object.__getattribute__(self, "_api_key"),
113
+ object.__getattribute__(self, "_base_url"),
123
114
  function_name,
124
115
  object.__getattribute__(self, "_provider_type"),
125
116
  args,
@@ -128,7 +119,6 @@ class PromptLayerBase(object):
128
119
  function_response,
129
120
  request_start_time,
130
121
  request_end_time,
131
- object.__getattribute__(self, "_api_key"),
132
122
  return_pl_id=return_pl_id,
133
123
  llm_request_span_id=llm_request_span_id,
134
124
  )
@@ -138,31 +128,33 @@ class PromptLayerBase(object):
138
128
  # Without tracing
139
129
  if inspect.isclass(function_object):
140
130
  return PromptLayerBase(
131
+ object.__getattribute__(self, "_api_key"),
132
+ object.__getattribute__(self, "_base_url"),
141
133
  function_object(*args, **kwargs),
142
134
  function_name=function_name,
143
135
  provider_type=object.__getattribute__(self, "_provider_type"),
144
- api_key=object.__getattribute__(self, "_api_key"),
145
136
  )
146
137
 
147
138
  function_response = function_object(*args, **kwargs)
148
139
 
149
- if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(
150
- function_response
151
- ):
140
+ if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(function_response):
152
141
  return async_wrapper(
142
+ object.__getattribute__(self, "_api_key"),
143
+ object.__getattribute__(self, "_base_url"),
153
144
  function_response,
154
145
  return_pl_id,
155
146
  request_start_time,
156
147
  function_name,
157
148
  object.__getattribute__(self, "_provider_type"),
158
149
  tags,
159
- api_key=object.__getattribute__(self, "_api_key"),
160
150
  *args,
161
151
  **kwargs,
162
152
  )
163
153
 
164
154
  request_end_time = datetime.datetime.now().timestamp()
165
155
  return promptlayer_api_handler(
156
+ object.__getattribute__(self, "_api_key"),
157
+ object.__getattribute__(self, "_base_url"),
166
158
  function_name,
167
159
  object.__getattribute__(self, "_provider_type"),
168
160
  args,
@@ -171,6 +163,5 @@ class PromptLayerBase(object):
171
163
  function_response,
172
164
  request_start_time,
173
165
  request_end_time,
174
- object.__getattribute__(self, "_api_key"),
175
166
  return_pl_id=return_pl_id,
176
167
  )
@@ -0,0 +1,460 @@
1
+ import asyncio
2
+ import datetime
3
+ from copy import deepcopy
4
+ from functools import wraps
5
+ from typing import Any, Dict, Union
6
+
7
+ from opentelemetry.sdk.resources import Resource
8
+ from opentelemetry.sdk.trace import TracerProvider
9
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
10
+ from opentelemetry.semconv.resource import ResourceAttributes
11
+
12
+ from promptlayer.span_exporter import PromptLayerSpanExporter
13
+ from promptlayer.streaming import (
14
+ aanthropic_stream_completion,
15
+ aanthropic_stream_message,
16
+ abedrock_stream_message,
17
+ agoogle_stream_chat,
18
+ agoogle_stream_completion,
19
+ amistral_stream_chat,
20
+ anthropic_stream_completion,
21
+ anthropic_stream_message,
22
+ aopenai_responses_stream_chat,
23
+ aopenai_stream_chat,
24
+ aopenai_stream_completion,
25
+ bedrock_stream_message,
26
+ google_stream_chat,
27
+ google_stream_completion,
28
+ mistral_stream_chat,
29
+ openai_responses_stream_chat,
30
+ openai_stream_chat,
31
+ openai_stream_completion,
32
+ )
33
+ from promptlayer.utils import (
34
+ aamazon_bedrock_request,
35
+ aanthropic_bedrock_request,
36
+ aanthropic_request,
37
+ aazure_openai_request,
38
+ agoogle_request,
39
+ amazon_bedrock_request,
40
+ amistral_request,
41
+ anthropic_bedrock_request,
42
+ anthropic_request,
43
+ aopenai_request,
44
+ avertexai_request,
45
+ azure_openai_request,
46
+ google_request,
47
+ mistral_request,
48
+ openai_request,
49
+ vertexai_request,
50
+ )
51
+
52
+ MAP_PROVIDER_TO_FUNCTION_NAME = {
53
+ "openai:chat-completions": {
54
+ "chat": {
55
+ "function_name": "openai.chat.completions.create",
56
+ "stream_function": openai_stream_chat,
57
+ },
58
+ "completion": {
59
+ "function_name": "openai.completions.create",
60
+ "stream_function": openai_stream_completion,
61
+ },
62
+ },
63
+ "openai:responses": {
64
+ "chat": {
65
+ "function_name": "openai.responses.create",
66
+ "stream_function": openai_responses_stream_chat,
67
+ },
68
+ "completion": {
69
+ "function_name": "openai.responses.create",
70
+ "stream_function": openai_responses_stream_chat,
71
+ },
72
+ },
73
+ "anthropic": {
74
+ "chat": {
75
+ "function_name": "anthropic.messages.create",
76
+ "stream_function": anthropic_stream_message,
77
+ },
78
+ "completion": {
79
+ "function_name": "anthropic.completions.create",
80
+ "stream_function": anthropic_stream_completion,
81
+ },
82
+ },
83
+ "openai.azure:chat-completions": {
84
+ "chat": {
85
+ "function_name": "openai.AzureOpenAI.chat.completions.create",
86
+ "stream_function": openai_stream_chat,
87
+ },
88
+ "completion": {
89
+ "function_name": "openai.AzureOpenAI.completions.create",
90
+ "stream_function": openai_stream_completion,
91
+ },
92
+ },
93
+ "openai.azure:responses": {
94
+ "chat": {
95
+ "function_name": "openai.AzureOpenAI.responses.create",
96
+ "stream_function": openai_responses_stream_chat,
97
+ },
98
+ "completion": {
99
+ "function_name": "openai.AzureOpenAI.responses.create",
100
+ "stream_function": openai_responses_stream_chat,
101
+ },
102
+ },
103
+ "mistral": {
104
+ "chat": {
105
+ "function_name": "mistral.client.chat",
106
+ "stream_function": mistral_stream_chat,
107
+ },
108
+ "completion": {
109
+ "function_name": None,
110
+ "stream_function": None,
111
+ },
112
+ },
113
+ "google": {
114
+ "chat": {
115
+ "function_name": "google.convo.send_message",
116
+ "stream_function": google_stream_chat,
117
+ },
118
+ "completion": {
119
+ "function_name": "google.model.generate_content",
120
+ "stream_function": google_stream_completion,
121
+ },
122
+ },
123
+ "amazon.bedrock": {
124
+ "chat": {
125
+ "function_name": "boto3.bedrock-runtime.converse",
126
+ "stream_function": bedrock_stream_message,
127
+ },
128
+ "completion": {
129
+ "function_name": "boto3.bedrock-runtime.converse",
130
+ "stream_function": bedrock_stream_message,
131
+ },
132
+ },
133
+ "anthropic.bedrock": {
134
+ "chat": {
135
+ "function_name": "anthropic.messages.create",
136
+ "stream_function": anthropic_stream_message,
137
+ },
138
+ "completion": {
139
+ "function_name": "anthropic.completions.create",
140
+ "stream_function": anthropic_stream_completion,
141
+ },
142
+ },
143
+ }
144
+
145
+
146
+ MAP_PROVIDER_TO_FUNCTION = {
147
+ "anthropic": anthropic_request,
148
+ "google": google_request,
149
+ "mistral": mistral_request,
150
+ "openai": openai_request,
151
+ "openai.azure": azure_openai_request,
152
+ "vertexai": vertexai_request,
153
+ "amazon.bedrock": amazon_bedrock_request,
154
+ "anthropic.bedrock": anthropic_bedrock_request,
155
+ }
156
+
157
+ AMAP_PROVIDER_TO_FUNCTION_NAME = {
158
+ "openai:chat-completions": {
159
+ "chat": {
160
+ "function_name": "openai.chat.completions.create",
161
+ "stream_function": aopenai_stream_chat,
162
+ },
163
+ "completion": {
164
+ "function_name": "openai.completions.create",
165
+ "stream_function": aopenai_stream_completion,
166
+ },
167
+ },
168
+ "openai:responses": {
169
+ "chat": {
170
+ "function_name": "openai.responses.create",
171
+ "stream_function": aopenai_responses_stream_chat,
172
+ },
173
+ "completion": {
174
+ "function_name": "openai.responses.create",
175
+ "stream_function": aopenai_responses_stream_chat,
176
+ },
177
+ },
178
+ "anthropic": {
179
+ "chat": {
180
+ "function_name": "anthropic.messages.create",
181
+ "stream_function": aanthropic_stream_message,
182
+ },
183
+ "completion": {
184
+ "function_name": "anthropic.completions.create",
185
+ "stream_function": aanthropic_stream_completion,
186
+ },
187
+ },
188
+ "openai.azure:chat-completions": {
189
+ "chat": {
190
+ "function_name": "openai.AzureOpenAI.chat.completions.create",
191
+ "stream_function": aopenai_stream_chat,
192
+ },
193
+ "completion": {
194
+ "function_name": "openai.AzureOpenAI.completions.create",
195
+ "stream_function": aopenai_stream_completion,
196
+ },
197
+ },
198
+ "openai.azure:responses": {
199
+ "chat": {
200
+ "function_name": "openai.AzureOpenAI.responses.create",
201
+ "stream_function": aopenai_responses_stream_chat,
202
+ },
203
+ "completion": {
204
+ "function_name": "openai.AzureOpenAI.responses.create",
205
+ "stream_function": aopenai_responses_stream_chat,
206
+ },
207
+ },
208
+ "mistral": {
209
+ "chat": {
210
+ "function_name": "mistral.client.chat",
211
+ "stream_function": amistral_stream_chat,
212
+ },
213
+ "completion": {
214
+ "function_name": None,
215
+ "stream_function": None,
216
+ },
217
+ },
218
+ "google": {
219
+ "chat": {
220
+ "function_name": "google.convo.send_message",
221
+ "stream_function": agoogle_stream_chat,
222
+ },
223
+ "completion": {
224
+ "function_name": "google.model.generate_content",
225
+ "stream_function": agoogle_stream_completion,
226
+ },
227
+ },
228
+ "amazon.bedrock": {
229
+ "chat": {
230
+ "function_name": "boto3.bedrock-runtime.converse",
231
+ "stream_function": abedrock_stream_message,
232
+ },
233
+ "completion": {
234
+ "function_name": "boto3.bedrock-runtime.converse",
235
+ "stream_function": abedrock_stream_message,
236
+ },
237
+ },
238
+ "anthropic.bedrock": {
239
+ "chat": {
240
+ "function_name": "anthropic.messages.create",
241
+ "stream_function": aanthropic_stream_message,
242
+ },
243
+ "completion": {
244
+ "function_name": "anthropic.completions.create",
245
+ "stream_function": aanthropic_stream_completion,
246
+ },
247
+ },
248
+ }
249
+
250
+
251
+ AMAP_PROVIDER_TO_FUNCTION = {
252
+ "anthropic": aanthropic_request,
253
+ "google": agoogle_request,
254
+ "mistral": amistral_request,
255
+ "openai": aopenai_request,
256
+ "openai.azure": aazure_openai_request,
257
+ "vertexai": avertexai_request,
258
+ "amazon.bedrock": aamazon_bedrock_request,
259
+ "anthropic.bedrock": aanthropic_bedrock_request,
260
+ }
261
+
262
+
263
+ class PromptLayerMixin:
264
+ @staticmethod
265
+ def _initialize_tracer(api_key: str, base_url: str, throw_on_error: bool, enable_tracing: bool = False):
266
+ if enable_tracing:
267
+ resource = Resource(attributes={ResourceAttributes.SERVICE_NAME: "prompt-layer-library"})
268
+ tracer_provider = TracerProvider(resource=resource)
269
+ promptlayer_exporter = PromptLayerSpanExporter(
270
+ api_key=api_key, base_url=base_url, throw_on_error=throw_on_error
271
+ )
272
+ span_processor = BatchSpanProcessor(promptlayer_exporter)
273
+ tracer_provider.add_span_processor(span_processor)
274
+ tracer = tracer_provider.get_tracer(__name__)
275
+ return tracer_provider, tracer
276
+ else:
277
+ return None, None
278
+
279
+ @staticmethod
280
+ def _prepare_get_prompt_template_params(
281
+ *,
282
+ prompt_version: Union[int, None],
283
+ prompt_release_label: Union[str, None],
284
+ input_variables: Union[Dict[str, Any], None],
285
+ metadata: Union[Dict[str, str], None],
286
+ provider: Union[str, None] = None,
287
+ model: Union[str, None] = None,
288
+ model_parameter_overrides: Union[Dict[str, Any], None] = None,
289
+ ) -> Dict[str, Any]:
290
+ params = {}
291
+
292
+ if prompt_version:
293
+ params["version"] = prompt_version
294
+ if prompt_release_label:
295
+ params["label"] = prompt_release_label
296
+ if input_variables:
297
+ params["input_variables"] = input_variables
298
+ if metadata:
299
+ params["metadata_filters"] = metadata
300
+ if provider:
301
+ params["provider"] = provider
302
+ if model:
303
+ params["model"] = model
304
+ if model_parameter_overrides:
305
+ params["model_parameter_overrides"] = model_parameter_overrides
306
+
307
+ return params
308
+
309
+ @staticmethod
310
+ def _prepare_llm_data(
311
+ *,
312
+ prompt_blueprint,
313
+ prompt_template,
314
+ prompt_blueprint_model,
315
+ stream,
316
+ is_async=False,
317
+ ):
318
+ client_kwargs = {}
319
+ function_kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
320
+ function_kwargs["stream"] = stream
321
+ provider = prompt_blueprint_model["provider"]
322
+ api_type = prompt_blueprint_model.get("api_type", "chat-completions")
323
+
324
+ if custom_provider := prompt_blueprint.get("custom_provider"):
325
+ provider = custom_provider["client"]
326
+ client_kwargs = {
327
+ "api_key": custom_provider["api_key"],
328
+ "base_url": custom_provider["base_url"],
329
+ }
330
+ elif provider_base_url := prompt_blueprint.get("provider_base_url"):
331
+ client_kwargs["base_url"] = provider_base_url["url"]
332
+
333
+ if stream and provider in ["openai", "openai.azure"] and api_type == "chat-completions":
334
+ function_kwargs["stream_options"] = {"include_usage": True}
335
+
336
+ provider_function_name = provider
337
+ if provider_function_name == "vertexai":
338
+ if "gemini" in prompt_blueprint_model["name"]:
339
+ provider_function_name = "google"
340
+ elif "claude" in prompt_blueprint_model["name"]:
341
+ provider_function_name = "anthropic"
342
+
343
+ if provider_function_name in ("openai", "openai.azure"):
344
+ api = api_type if api_type is not None else "chat-completions"
345
+ provider_function_name = f"{provider_function_name}:{api}"
346
+
347
+ if is_async:
348
+ config = AMAP_PROVIDER_TO_FUNCTION_NAME[provider_function_name][prompt_template["type"]]
349
+ request_function = AMAP_PROVIDER_TO_FUNCTION[provider]
350
+ else:
351
+ config = MAP_PROVIDER_TO_FUNCTION_NAME[provider_function_name][prompt_template["type"]]
352
+ request_function = MAP_PROVIDER_TO_FUNCTION[provider]
353
+
354
+ return {
355
+ "provider": provider,
356
+ "function_name": config["function_name"],
357
+ "stream_function": config["stream_function"],
358
+ "request_function": request_function,
359
+ "client_kwargs": client_kwargs,
360
+ "function_kwargs": function_kwargs,
361
+ "prompt_blueprint": prompt_blueprint,
362
+ }
363
+
364
+ @staticmethod
365
+ def _validate_and_extract_model_from_prompt_blueprint(*, prompt_blueprint, prompt_name):
366
+ if not prompt_blueprint["llm_kwargs"]:
367
+ raise ValueError(
368
+ f"Prompt '{prompt_name}' does not have any LLM kwargs associated with it. Please set your model parameters in the registry in the PromptLayer dashbaord."
369
+ )
370
+
371
+ prompt_blueprint_metadata = prompt_blueprint.get("metadata")
372
+
373
+ if not prompt_blueprint_metadata:
374
+ raise ValueError(f"Prompt '{prompt_name}' does not have any metadata associated with it.")
375
+
376
+ prompt_blueprint_model = prompt_blueprint_metadata.get("model")
377
+
378
+ if not prompt_blueprint_model:
379
+ raise ValueError(f"Prompt '{prompt_name}' does not have a model parameters associated with it.")
380
+
381
+ return prompt_blueprint_model
382
+
383
+ @staticmethod
384
+ def _prepare_track_request_kwargs(
385
+ api_key,
386
+ request_params,
387
+ tags,
388
+ input_variables,
389
+ group_id,
390
+ pl_run_span_id: Union[str, None] = None,
391
+ metadata: Union[Dict[str, str], None] = None,
392
+ request_start_time: Union[float, None] = None,
393
+ request_end_time: Union[float, None] = None,
394
+ **body,
395
+ ):
396
+ # If timestamps are not provided, generate them (for backward compatibility)
397
+ # But note that this is the old buggy behavior
398
+ if request_start_time is None:
399
+ request_start_time = datetime.datetime.now(datetime.timezone.utc).timestamp()
400
+ if request_end_time is None:
401
+ request_end_time = datetime.datetime.now(datetime.timezone.utc).timestamp()
402
+
403
+ return {
404
+ "function_name": request_params["function_name"],
405
+ "provider_type": request_params["provider"],
406
+ "args": [],
407
+ "kwargs": request_params["function_kwargs"],
408
+ "tags": tags,
409
+ "request_start_time": request_start_time,
410
+ "request_end_time": request_end_time,
411
+ "api_key": api_key,
412
+ "metadata": metadata,
413
+ "prompt_id": request_params["prompt_blueprint"]["id"],
414
+ "prompt_version": request_params["prompt_blueprint"]["version"],
415
+ "prompt_input_variables": input_variables or {},
416
+ "group_id": group_id,
417
+ "return_prompt_blueprint": True,
418
+ "span_id": pl_run_span_id,
419
+ **body,
420
+ }
421
+
422
+ def traceable(self, attributes=None, name=None):
423
+ def decorator(func):
424
+ @wraps(func)
425
+ def sync_wrapper(*args, **kwargs):
426
+ if self.tracer:
427
+ span_name = name or func.__name__
428
+ with self.tracer.start_as_current_span(span_name) as span:
429
+ if attributes:
430
+ for key, value in attributes.items():
431
+ span.set_attribute(key, value)
432
+
433
+ span.set_attribute("function_input", str({"args": args, "kwargs": kwargs}))
434
+ result = func(*args, **kwargs)
435
+ span.set_attribute("function_output", str(result))
436
+
437
+ return result
438
+ else:
439
+ return func(*args, **kwargs)
440
+
441
+ @wraps(func)
442
+ async def async_wrapper(*args, **kwargs):
443
+ if self.tracer:
444
+ span_name = name or func.__name__
445
+ with self.tracer.start_as_current_span(span_name) as span:
446
+ if attributes:
447
+ for key, value in attributes.items():
448
+ span.set_attribute(key, value)
449
+
450
+ span.set_attribute("function_input", str({"args": args, "kwargs": kwargs}))
451
+ result = await func(*args, **kwargs)
452
+ span.set_attribute("function_output", str(result))
453
+
454
+ return result
455
+ else:
456
+ return await func(*args, **kwargs)
457
+
458
+ return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
459
+
460
+ return decorator