promptlayer 1.0.29__py3-none-any.whl → 1.0.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

promptlayer/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  from .promptlayer import AsyncPromptLayer, PromptLayer
2
2
 
3
- __version__ = "1.0.29"
3
+ __version__ = "1.0.31"
4
4
  __all__ = ["PromptLayer", "AsyncPromptLayer", "__version__"]
@@ -1,78 +1,27 @@
1
1
  import asyncio
2
- import datetime
3
2
  import os
4
- from copy import deepcopy
5
- from functools import wraps
6
3
  from typing import Any, Dict, List, Literal, Optional, Union
7
4
 
8
5
  import nest_asyncio
9
- from opentelemetry.sdk.resources import Resource
10
- from opentelemetry.sdk.trace import TracerProvider
11
- from opentelemetry.sdk.trace.export import BatchSpanProcessor
12
- from opentelemetry.semconv.resource import ResourceAttributes
13
6
 
14
7
  from promptlayer.groups import AsyncGroupManager, GroupManager
15
8
  from promptlayer.promptlayer_base import PromptLayerBase
16
- from promptlayer.span_exporter import PromptLayerSpanExporter
9
+ from promptlayer.promptlayer_mixins import PromptLayerMixin
17
10
  from promptlayer.templates import AsyncTemplateManager, TemplateManager
18
11
  from promptlayer.track import AsyncTrackManager, TrackManager
19
12
  from promptlayer.types.prompt_template import PromptTemplate
20
13
  from promptlayer.utils import (
21
- anthropic_request,
22
- anthropic_stream_completion,
23
- anthropic_stream_message,
24
14
  arun_workflow_request,
15
+ astream_response,
16
+ atrack_request,
25
17
  autil_log_request,
26
- azure_openai_request,
27
- openai_request,
28
- openai_stream_chat,
29
- openai_stream_completion,
30
18
  stream_response,
31
19
  track_request,
32
20
  util_log_request,
33
21
  )
34
22
 
35
- MAP_PROVIDER_TO_FUNCTION_NAME = {
36
- "openai": {
37
- "chat": {
38
- "function_name": "openai.chat.completions.create",
39
- "stream_function": openai_stream_chat,
40
- },
41
- "completion": {
42
- "function_name": "openai.completions.create",
43
- "stream_function": openai_stream_completion,
44
- },
45
- },
46
- "anthropic": {
47
- "chat": {
48
- "function_name": "anthropic.messages.create",
49
- "stream_function": anthropic_stream_message,
50
- },
51
- "completion": {
52
- "function_name": "anthropic.completions.create",
53
- "stream_function": anthropic_stream_completion,
54
- },
55
- },
56
- "openai.azure": {
57
- "chat": {
58
- "function_name": "openai.AzureOpenAI.chat.completions.create",
59
- "stream_function": openai_stream_chat,
60
- },
61
- "completion": {
62
- "function_name": "openai.AzureOpenAI.completions.create",
63
- "stream_function": openai_stream_completion,
64
- },
65
- },
66
- }
67
-
68
- MAP_PROVIDER_TO_FUNCTION = {
69
- "openai": openai_request,
70
- "anthropic": anthropic_request,
71
- "openai.azure": azure_openai_request,
72
- }
73
-
74
-
75
- class PromptLayer:
23
+
24
+ class PromptLayer(PromptLayerMixin):
76
25
  def __init__(
77
26
  self,
78
27
  api_key: str = None,
@@ -134,101 +83,18 @@ class PromptLayer:
134
83
  ):
135
84
  def _track_request(**body):
136
85
  track_request_kwargs = self._prepare_track_request_kwargs(
137
- request_params, tags, input_variables, group_id, pl_run_span_id, **body
86
+ self.api_key,
87
+ request_params,
88
+ tags,
89
+ input_variables,
90
+ group_id,
91
+ pl_run_span_id,
92
+ **body,
138
93
  )
139
94
  return track_request(**track_request_kwargs)
140
95
 
141
96
  return _track_request
142
97
 
143
- @staticmethod
144
- def _initialize_tracer(api_key: str = None, enable_tracing: bool = False):
145
- if enable_tracing:
146
- resource = Resource(
147
- attributes={ResourceAttributes.SERVICE_NAME: "prompt-layer-library"}
148
- )
149
- tracer_provider = TracerProvider(resource=resource)
150
- promptlayer_exporter = PromptLayerSpanExporter(api_key=api_key)
151
- span_processor = BatchSpanProcessor(promptlayer_exporter)
152
- tracer_provider.add_span_processor(span_processor)
153
- tracer = tracer_provider.get_tracer(__name__)
154
- return tracer_provider, tracer
155
- else:
156
- return None, None
157
-
158
- @staticmethod
159
- def _prepare_get_prompt_template_params(
160
- *, prompt_version, prompt_release_label, input_variables, metadata
161
- ):
162
- params = {}
163
-
164
- if prompt_version:
165
- params["version"] = prompt_version
166
- if prompt_release_label:
167
- params["label"] = prompt_release_label
168
- if input_variables:
169
- params["input_variables"] = input_variables
170
- if metadata:
171
- params["metadata_filters"] = metadata
172
-
173
- return params
174
-
175
- @staticmethod
176
- def _prepare_llm_request_params(
177
- *, prompt_blueprint, prompt_template, prompt_blueprint_model, stream
178
- ):
179
- provider = prompt_blueprint_model["provider"]
180
- kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
181
- config = MAP_PROVIDER_TO_FUNCTION_NAME[provider][prompt_template["type"]]
182
-
183
- if provider_base_url := prompt_blueprint.get("provider_base_url"):
184
- kwargs["base_url"] = provider_base_url["url"]
185
-
186
- kwargs["stream"] = stream
187
- if stream and provider in ["openai", "openai.azure"]:
188
- kwargs["stream_options"] = {"include_usage": True}
189
-
190
- return {
191
- "provider": provider,
192
- "function_name": config["function_name"],
193
- "stream_function": config["stream_function"],
194
- "request_function": MAP_PROVIDER_TO_FUNCTION[provider],
195
- "kwargs": kwargs,
196
- "prompt_blueprint": prompt_blueprint,
197
- }
198
-
199
- def _prepare_track_request_kwargs(
200
- self,
201
- request_params,
202
- tags,
203
- input_variables,
204
- group_id,
205
- pl_run_span_id: Union[str, None] = None,
206
- metadata: Union[Dict[str, str], None] = None,
207
- **body,
208
- ):
209
- return {
210
- "function_name": request_params["function_name"],
211
- "provider_type": request_params["provider"],
212
- "args": [],
213
- "kwargs": request_params["kwargs"],
214
- "tags": tags,
215
- "request_start_time": datetime.datetime.now(
216
- datetime.timezone.utc
217
- ).timestamp(),
218
- "request_end_time": datetime.datetime.now(
219
- datetime.timezone.utc
220
- ).timestamp(),
221
- "api_key": self.api_key,
222
- "metadata": metadata,
223
- "prompt_id": request_params["prompt_blueprint"]["id"],
224
- "prompt_version": request_params["prompt_blueprint"]["version"],
225
- "prompt_input_variables": input_variables,
226
- "group_id": group_id,
227
- "return_prompt_blueprint": True,
228
- "span_id": pl_run_span_id,
229
- **body,
230
- }
231
-
232
98
  def _run_internal(
233
99
  self,
234
100
  *,
@@ -236,6 +102,7 @@ class PromptLayer:
236
102
  prompt_version: Union[int, None] = None,
237
103
  prompt_release_label: Union[str, None] = None,
238
104
  input_variables: Union[Dict[str, Any], None] = None,
105
+ model_parameter_overrides: Union[Dict[str, Any], None] = None,
239
106
  tags: Union[List[str], None] = None,
240
107
  metadata: Union[Dict[str, str], None] = None,
241
108
  group_id: Union[int, None] = None,
@@ -256,6 +123,7 @@ class PromptLayer:
256
123
  prompt_blueprint=prompt_blueprint,
257
124
  prompt_template=prompt_blueprint["prompt_template"],
258
125
  prompt_blueprint_model=prompt_blueprint_model,
126
+ model_parameter_overrides=model_parameter_overrides,
259
127
  stream=stream,
260
128
  )
261
129
 
@@ -303,6 +171,7 @@ class PromptLayer:
303
171
  **body,
304
172
  ):
305
173
  track_request_kwargs = self._prepare_track_request_kwargs(
174
+ self.api_key,
306
175
  request_params,
307
176
  tags,
308
177
  input_variables,
@@ -313,37 +182,13 @@ class PromptLayer:
313
182
  )
314
183
  return track_request(**track_request_kwargs)
315
184
 
316
- @staticmethod
317
- def _validate_and_extract_model_from_prompt_blueprint(
318
- *, prompt_blueprint, prompt_name
319
- ):
320
- if not prompt_blueprint["llm_kwargs"]:
321
- raise ValueError(
322
- f"Prompt '{prompt_name}' does not have any LLM kwargs associated with it."
323
- )
324
-
325
- prompt_blueprint_metadata = prompt_blueprint.get("metadata")
326
-
327
- if not prompt_blueprint_metadata:
328
- raise ValueError(
329
- f"Prompt '{prompt_name}' does not have any metadata associated with it."
330
- )
331
-
332
- prompt_blueprint_model = prompt_blueprint_metadata.get("model")
333
-
334
- if not prompt_blueprint_model:
335
- raise ValueError(
336
- f"Prompt '{prompt_name}' does not have a model parameters associated with it."
337
- )
338
-
339
- return prompt_blueprint_model
340
-
341
185
  def run(
342
186
  self,
343
187
  prompt_name: str,
344
188
  prompt_version: Union[int, None] = None,
345
189
  prompt_release_label: Union[str, None] = None,
346
190
  input_variables: Union[Dict[str, Any], None] = None,
191
+ model_parameter_overrides: Union[Dict[str, Any], None] = None,
347
192
  tags: Union[List[str], None] = None,
348
193
  metadata: Union[Dict[str, str], None] = None,
349
194
  group_id: Union[int, None] = None,
@@ -354,6 +199,7 @@ class PromptLayer:
354
199
  "prompt_version": prompt_version,
355
200
  "prompt_release_label": prompt_release_label,
356
201
  "input_variables": input_variables,
202
+ "model_parameter_overrides": model_parameter_overrides,
357
203
  "tags": tags,
358
204
  "metadata": metadata,
359
205
  "group_id": group_id,
@@ -421,50 +267,6 @@ class PromptLayer:
421
267
  except Exception as e:
422
268
  raise Exception(f"Error running workflow: {str(e)}")
423
269
 
424
- def traceable(self, attributes=None, name=None):
425
- def decorator(func):
426
- @wraps(func)
427
- def sync_wrapper(*args, **kwargs):
428
- if self.tracer:
429
- span_name = name or func.__name__
430
- with self.tracer.start_as_current_span(span_name) as span:
431
- if attributes:
432
- for key, value in attributes.items():
433
- span.set_attribute(key, value)
434
-
435
- span.set_attribute(
436
- "function_input", str({"args": args, "kwargs": kwargs})
437
- )
438
- result = func(*args, **kwargs)
439
- span.set_attribute("function_output", str(result))
440
-
441
- return result
442
- else:
443
- return func(*args, **kwargs)
444
-
445
- @wraps(func)
446
- async def async_wrapper(*args, **kwargs):
447
- if self.tracer:
448
- span_name = name or func.__name__
449
- with self.tracer.start_as_current_span(span_name) as span:
450
- if attributes:
451
- for key, value in attributes.items():
452
- span.set_attribute(key, value)
453
-
454
- span.set_attribute(
455
- "function_input", str({"args": args, "kwargs": kwargs})
456
- )
457
- result = await func(*args, **kwargs)
458
- span.set_attribute("function_output", str(result))
459
-
460
- return result
461
- else:
462
- return await func(*args, **kwargs)
463
-
464
- return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
465
-
466
- return decorator
467
-
468
270
  def log_request(
469
271
  self,
470
272
  *,
@@ -508,10 +310,11 @@ class PromptLayer:
508
310
  )
509
311
 
510
312
 
511
- class AsyncPromptLayer:
313
+ class AsyncPromptLayer(PromptLayerMixin):
512
314
  def __init__(
513
315
  self,
514
316
  api_key: str = None,
317
+ enable_tracing: bool = False,
515
318
  ):
516
319
  if api_key is None:
517
320
  api_key = os.environ.get("PROMPTLAYER_API_KEY")
@@ -525,8 +328,36 @@ class AsyncPromptLayer:
525
328
  self.api_key = api_key
526
329
  self.templates = AsyncTemplateManager(api_key)
527
330
  self.group = AsyncGroupManager(api_key)
331
+ self.tracer_provider, self.tracer = self._initialize_tracer(
332
+ api_key, enable_tracing
333
+ )
528
334
  self.track = AsyncTrackManager(api_key)
529
335
 
336
+ def __getattr__(
337
+ self, name: Union[Literal["openai"], Literal["anthropic"], Literal["prompts"]]
338
+ ):
339
+ if name == "openai":
340
+ import openai as openai_module
341
+
342
+ openai = PromptLayerBase(
343
+ openai_module,
344
+ function_name="openai",
345
+ api_key=self.api_key,
346
+ )
347
+ return openai
348
+ elif name == "anthropic":
349
+ import anthropic as anthropic_module
350
+
351
+ anthropic = PromptLayerBase(
352
+ anthropic_module,
353
+ function_name="anthropic",
354
+ provider_type="anthropic",
355
+ api_key=self.api_key,
356
+ )
357
+ return anthropic
358
+ else:
359
+ raise AttributeError(f"module {__name__} has no attribute {name}")
360
+
530
361
  async def run_workflow(
531
362
  self,
532
363
  workflow_name: str,
@@ -552,6 +383,43 @@ class AsyncPromptLayer:
552
383
  except Exception as e:
553
384
  raise Exception(f"Error running workflow: {str(e)}")
554
385
 
386
+ async def run(
387
+ self,
388
+ prompt_name: str,
389
+ prompt_version: Union[int, None] = None,
390
+ prompt_release_label: Union[str, None] = None,
391
+ input_variables: Union[Dict[str, Any], None] = None,
392
+ model_parameter_overrides: Union[Dict[str, Any], None] = None,
393
+ tags: Union[List[str], None] = None,
394
+ metadata: Union[Dict[str, str], None] = None,
395
+ group_id: Union[int, None] = None,
396
+ stream: bool = False,
397
+ ) -> Dict[str, Any]:
398
+ _run_internal_kwargs = {
399
+ "prompt_name": prompt_name,
400
+ "prompt_version": prompt_version,
401
+ "prompt_release_label": prompt_release_label,
402
+ "input_variables": input_variables,
403
+ "model_parameter_overrides": model_parameter_overrides,
404
+ "tags": tags,
405
+ "metadata": metadata,
406
+ "group_id": group_id,
407
+ "stream": stream,
408
+ }
409
+
410
+ if self.tracer:
411
+ with self.tracer.start_as_current_span("PromptLayer Run") as span:
412
+ span.set_attribute("prompt_name", prompt_name)
413
+ span.set_attribute("function_input", str(_run_internal_kwargs))
414
+ pl_run_span_id = hex(span.context.span_id)[2:].zfill(16)
415
+ result = await self._run_internal(
416
+ **_run_internal_kwargs, pl_run_span_id=pl_run_span_id
417
+ )
418
+ span.set_attribute("function_output", str(result))
419
+ return result
420
+ else:
421
+ return await self._run_internal(**_run_internal_kwargs)
422
+
555
423
  async def log_request(
556
424
  self,
557
425
  *,
@@ -593,3 +461,116 @@ class AsyncPromptLayer:
593
461
  function_name=function_name,
594
462
  score=score,
595
463
  )
464
+
465
+ async def _create_track_request_callable(
466
+ self,
467
+ *,
468
+ request_params,
469
+ tags,
470
+ input_variables,
471
+ group_id,
472
+ pl_run_span_id: Union[str, None] = None,
473
+ ):
474
+ async def _track_request(**body):
475
+ track_request_kwargs = self._prepare_track_request_kwargs(
476
+ self.api_key,
477
+ request_params,
478
+ tags,
479
+ input_variables,
480
+ group_id,
481
+ pl_run_span_id,
482
+ **body,
483
+ )
484
+ return await atrack_request(**track_request_kwargs)
485
+
486
+ return await _track_request
487
+
488
+ async def _track_request_log(
489
+ self,
490
+ request_params,
491
+ tags,
492
+ input_variables,
493
+ group_id,
494
+ pl_run_span_id: Union[str, None] = None,
495
+ metadata: Union[Dict[str, str], None] = None,
496
+ **body,
497
+ ):
498
+ track_request_kwargs = self._prepare_track_request_kwargs(
499
+ self.api_key,
500
+ request_params,
501
+ tags,
502
+ input_variables,
503
+ group_id,
504
+ pl_run_span_id,
505
+ metadata=metadata,
506
+ **body,
507
+ )
508
+ return await atrack_request(**track_request_kwargs)
509
+
510
+ async def _run_internal(
511
+ self,
512
+ *,
513
+ prompt_name: str,
514
+ prompt_version: Union[int, None] = None,
515
+ prompt_release_label: Union[str, None] = None,
516
+ input_variables: Union[Dict[str, Any], None] = None,
517
+ model_parameter_overrides: Union[Dict[str, Any], None] = None,
518
+ tags: Union[List[str], None] = None,
519
+ metadata: Union[Dict[str, str], None] = None,
520
+ group_id: Union[int, None] = None,
521
+ stream: bool = False,
522
+ pl_run_span_id: Union[str, None] = None,
523
+ ) -> Dict[str, Any]:
524
+ get_prompt_template_params = self._prepare_get_prompt_template_params(
525
+ prompt_version=prompt_version,
526
+ prompt_release_label=prompt_release_label,
527
+ input_variables=input_variables,
528
+ metadata=metadata,
529
+ )
530
+ prompt_blueprint = await self.templates.get(
531
+ prompt_name, get_prompt_template_params
532
+ )
533
+ prompt_blueprint_model = self._validate_and_extract_model_from_prompt_blueprint(
534
+ prompt_blueprint=prompt_blueprint, prompt_name=prompt_name
535
+ )
536
+ llm_request_params = self._prepare_llm_request_params(
537
+ prompt_blueprint=prompt_blueprint,
538
+ prompt_template=prompt_blueprint["prompt_template"],
539
+ prompt_blueprint_model=prompt_blueprint_model,
540
+ model_parameter_overrides=model_parameter_overrides,
541
+ stream=stream,
542
+ is_async=True,
543
+ )
544
+
545
+ response = await llm_request_params["request_function"](
546
+ llm_request_params["prompt_blueprint"], **llm_request_params["kwargs"]
547
+ )
548
+
549
+ if stream:
550
+ return astream_response(
551
+ response,
552
+ self._create_track_request_callable(
553
+ request_params=llm_request_params,
554
+ tags=tags,
555
+ input_variables=input_variables,
556
+ group_id=group_id,
557
+ pl_run_span_id=pl_run_span_id,
558
+ ),
559
+ llm_request_params["stream_function"],
560
+ )
561
+
562
+ request_log = await self._track_request_log(
563
+ llm_request_params,
564
+ tags,
565
+ input_variables,
566
+ group_id,
567
+ pl_run_span_id,
568
+ metadata=metadata,
569
+ request_response=response.model_dump(),
570
+ )
571
+
572
+ return {
573
+ "request_id": request_log.get("request_id", None),
574
+ "raw_response": response,
575
+ "prompt_blueprint": request_log.get("prompt_blueprint", None),
576
+ }
@@ -0,0 +1,283 @@
1
+ import asyncio
2
+ import datetime
3
+ from copy import deepcopy
4
+ from functools import wraps
5
+ from typing import Dict, Union
6
+
7
+ from opentelemetry.sdk.resources import Resource
8
+ from opentelemetry.sdk.trace import TracerProvider
9
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
10
+ from opentelemetry.semconv.resource import ResourceAttributes
11
+
12
+ from promptlayer.span_exporter import PromptLayerSpanExporter
13
+ from promptlayer.utils import (
14
+ aanthropic_request,
15
+ aanthropic_stream_completion,
16
+ aanthropic_stream_message,
17
+ aazure_openai_request,
18
+ anthropic_request,
19
+ anthropic_stream_completion,
20
+ anthropic_stream_message,
21
+ aopenai_request,
22
+ aopenai_stream_chat,
23
+ aopenai_stream_completion,
24
+ azure_openai_request,
25
+ openai_request,
26
+ openai_stream_chat,
27
+ openai_stream_completion,
28
+ )
29
+
30
+ MAP_PROVIDER_TO_FUNCTION_NAME = {
31
+ "openai": {
32
+ "chat": {
33
+ "function_name": "openai.chat.completions.create",
34
+ "stream_function": openai_stream_chat,
35
+ },
36
+ "completion": {
37
+ "function_name": "openai.completions.create",
38
+ "stream_function": openai_stream_completion,
39
+ },
40
+ },
41
+ "anthropic": {
42
+ "chat": {
43
+ "function_name": "anthropic.messages.create",
44
+ "stream_function": anthropic_stream_message,
45
+ },
46
+ "completion": {
47
+ "function_name": "anthropic.completions.create",
48
+ "stream_function": anthropic_stream_completion,
49
+ },
50
+ },
51
+ "openai.azure": {
52
+ "chat": {
53
+ "function_name": "openai.AzureOpenAI.chat.completions.create",
54
+ "stream_function": openai_stream_chat,
55
+ },
56
+ "completion": {
57
+ "function_name": "openai.AzureOpenAI.completions.create",
58
+ "stream_function": openai_stream_completion,
59
+ },
60
+ },
61
+ }
62
+
63
+
64
+ MAP_PROVIDER_TO_FUNCTION = {
65
+ "openai": openai_request,
66
+ "anthropic": anthropic_request,
67
+ "openai.azure": azure_openai_request,
68
+ }
69
+
70
+ AMAP_PROVIDER_TO_FUNCTION_NAME = {
71
+ "openai": {
72
+ "chat": {
73
+ "function_name": "openai.chat.completions.create",
74
+ "stream_function": aopenai_stream_chat,
75
+ },
76
+ "completion": {
77
+ "function_name": "openai.completions.create",
78
+ "stream_function": aopenai_stream_completion,
79
+ },
80
+ },
81
+ "anthropic": {
82
+ "chat": {
83
+ "function_name": "anthropic.messages.create",
84
+ "stream_function": aanthropic_stream_message,
85
+ },
86
+ "completion": {
87
+ "function_name": "anthropic.completions.create",
88
+ "stream_function": aanthropic_stream_completion,
89
+ },
90
+ },
91
+ "openai.azure": {
92
+ "chat": {
93
+ "function_name": "openai.AzureOpenAI.chat.completions.create",
94
+ "stream_function": aopenai_stream_chat,
95
+ },
96
+ "completion": {
97
+ "function_name": "openai.AzureOpenAI.completions.create",
98
+ "stream_function": aopenai_stream_completion,
99
+ },
100
+ },
101
+ }
102
+
103
+
104
+ AMAP_PROVIDER_TO_FUNCTION = {
105
+ "openai": aopenai_request,
106
+ "anthropic": aanthropic_request,
107
+ "openai.azure": aazure_openai_request,
108
+ }
109
+
110
+
111
+ class PromptLayerMixin:
112
+ @staticmethod
113
+ def _initialize_tracer(api_key: str = None, enable_tracing: bool = False):
114
+ if enable_tracing:
115
+ resource = Resource(
116
+ attributes={ResourceAttributes.SERVICE_NAME: "prompt-layer-library"}
117
+ )
118
+ tracer_provider = TracerProvider(resource=resource)
119
+ promptlayer_exporter = PromptLayerSpanExporter(api_key=api_key)
120
+ span_processor = BatchSpanProcessor(promptlayer_exporter)
121
+ tracer_provider.add_span_processor(span_processor)
122
+ tracer = tracer_provider.get_tracer(__name__)
123
+ return tracer_provider, tracer
124
+ else:
125
+ return None, None
126
+
127
+ @staticmethod
128
+ def _prepare_get_prompt_template_params(
129
+ *, prompt_version, prompt_release_label, input_variables, metadata
130
+ ):
131
+ params = {}
132
+
133
+ if prompt_version:
134
+ params["version"] = prompt_version
135
+ if prompt_release_label:
136
+ params["label"] = prompt_release_label
137
+ if input_variables:
138
+ params["input_variables"] = input_variables
139
+ if metadata:
140
+ params["metadata_filters"] = metadata
141
+
142
+ return params
143
+
144
+ @staticmethod
145
+ def _prepare_llm_request_params(
146
+ *,
147
+ prompt_blueprint,
148
+ prompt_template,
149
+ prompt_blueprint_model,
150
+ model_parameter_overrides,
151
+ stream,
152
+ is_async=False,
153
+ ):
154
+ provider = prompt_blueprint_model["provider"]
155
+ kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
156
+ if is_async:
157
+ config = AMAP_PROVIDER_TO_FUNCTION_NAME[provider][prompt_template["type"]]
158
+ request_function = AMAP_PROVIDER_TO_FUNCTION[provider]
159
+ else:
160
+ config = MAP_PROVIDER_TO_FUNCTION_NAME[provider][prompt_template["type"]]
161
+ request_function = MAP_PROVIDER_TO_FUNCTION[provider]
162
+
163
+ if provider_base_url := prompt_blueprint.get("provider_base_url"):
164
+ kwargs["base_url"] = provider_base_url["url"]
165
+
166
+ if model_parameter_overrides:
167
+ kwargs.update(model_parameter_overrides)
168
+
169
+ kwargs["stream"] = stream
170
+ if stream and provider in ["openai", "openai.azure"]:
171
+ kwargs["stream_options"] = {"include_usage": True}
172
+
173
+ return {
174
+ "provider": provider,
175
+ "function_name": config["function_name"],
176
+ "stream_function": config["stream_function"],
177
+ "request_function": request_function,
178
+ "kwargs": kwargs,
179
+ "prompt_blueprint": prompt_blueprint,
180
+ }
181
+
182
+ @staticmethod
183
+ def _validate_and_extract_model_from_prompt_blueprint(
184
+ *, prompt_blueprint, prompt_name
185
+ ):
186
+ if not prompt_blueprint["llm_kwargs"]:
187
+ raise ValueError(
188
+ f"Prompt '{prompt_name}' does not have any LLM kwargs associated with it."
189
+ )
190
+
191
+ prompt_blueprint_metadata = prompt_blueprint.get("metadata")
192
+
193
+ if not prompt_blueprint_metadata:
194
+ raise ValueError(
195
+ f"Prompt '{prompt_name}' does not have any metadata associated with it."
196
+ )
197
+
198
+ prompt_blueprint_model = prompt_blueprint_metadata.get("model")
199
+
200
+ if not prompt_blueprint_model:
201
+ raise ValueError(
202
+ f"Prompt '{prompt_name}' does not have a model parameters associated with it."
203
+ )
204
+
205
+ return prompt_blueprint_model
206
+
207
+ @staticmethod
208
+ def _prepare_track_request_kwargs(
209
+ api_key,
210
+ request_params,
211
+ tags,
212
+ input_variables,
213
+ group_id,
214
+ pl_run_span_id: Union[str, None] = None,
215
+ metadata: Union[Dict[str, str], None] = None,
216
+ **body,
217
+ ):
218
+ return {
219
+ "function_name": request_params["function_name"],
220
+ "provider_type": request_params["provider"],
221
+ "args": [],
222
+ "kwargs": request_params["kwargs"],
223
+ "tags": tags,
224
+ "request_start_time": datetime.datetime.now(
225
+ datetime.timezone.utc
226
+ ).timestamp(),
227
+ "request_end_time": datetime.datetime.now(
228
+ datetime.timezone.utc
229
+ ).timestamp(),
230
+ "api_key": api_key,
231
+ "metadata": metadata,
232
+ "prompt_id": request_params["prompt_blueprint"]["id"],
233
+ "prompt_version": request_params["prompt_blueprint"]["version"],
234
+ "prompt_input_variables": input_variables,
235
+ "group_id": group_id,
236
+ "return_prompt_blueprint": True,
237
+ "span_id": pl_run_span_id,
238
+ **body,
239
+ }
240
+
241
+ def traceable(self, attributes=None, name=None):
242
+ def decorator(func):
243
+ @wraps(func)
244
+ def sync_wrapper(*args, **kwargs):
245
+ if self.tracer:
246
+ span_name = name or func.__name__
247
+ with self.tracer.start_as_current_span(span_name) as span:
248
+ if attributes:
249
+ for key, value in attributes.items():
250
+ span.set_attribute(key, value)
251
+
252
+ span.set_attribute(
253
+ "function_input", str({"args": args, "kwargs": kwargs})
254
+ )
255
+ result = func(*args, **kwargs)
256
+ span.set_attribute("function_output", str(result))
257
+
258
+ return result
259
+ else:
260
+ return func(*args, **kwargs)
261
+
262
+ @wraps(func)
263
+ async def async_wrapper(*args, **kwargs):
264
+ if self.tracer:
265
+ span_name = name or func.__name__
266
+ with self.tracer.start_as_current_span(span_name) as span:
267
+ if attributes:
268
+ for key, value in attributes.items():
269
+ span.set_attribute(key, value)
270
+
271
+ span.set_attribute(
272
+ "function_input", str({"args": args, "kwargs": kwargs})
273
+ )
274
+ result = await func(*args, **kwargs)
275
+ span.set_attribute("function_output", str(result))
276
+
277
+ return result
278
+ else:
279
+ return await func(*args, **kwargs)
280
+
281
+ return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
282
+
283
+ return decorator
promptlayer/utils.py CHANGED
@@ -2,13 +2,24 @@ import asyncio
2
2
  import contextvars
3
3
  import datetime
4
4
  import functools
5
+ import inspect
5
6
  import json
6
7
  import os
7
8
  import sys
8
9
  import types
9
10
  from copy import deepcopy
10
11
  from enum import Enum
11
- from typing import Any, Callable, Dict, Generator, List, Optional, Union
12
+ from typing import (
13
+ Any,
14
+ AsyncGenerator,
15
+ AsyncIterable,
16
+ Callable,
17
+ Dict,
18
+ Generator,
19
+ List,
20
+ Optional,
21
+ Union,
22
+ )
12
23
 
13
24
  import httpx
14
25
  import requests
@@ -311,6 +322,27 @@ def track_request(**body):
311
322
  return {}
312
323
 
313
324
 
325
+ async def atrack_request(**body: Any) -> Dict[str, Any]:
326
+ try:
327
+ async with httpx.AsyncClient() as client:
328
+ response = await client.post(
329
+ f"{URL_API_PROMPTLAYER}/track-request",
330
+ json=body,
331
+ )
332
+ if response.status_code != 200:
333
+ warn_on_bad_response(
334
+ response,
335
+ f"PromptLayer had the following error while tracking your request: {response.text}",
336
+ )
337
+ return response.json()
338
+ except httpx.RequestError as e:
339
+ print(
340
+ f"WARNING: While logging your request PromptLayer had the following error: {e}",
341
+ file=sys.stderr,
342
+ )
343
+ return {}
344
+
345
+
314
346
  def promptlayer_api_request_async(
315
347
  function_name,
316
348
  provider_type,
@@ -1136,6 +1168,81 @@ def openai_stream_chat(results: list):
1136
1168
  return response
1137
1169
 
1138
1170
 
1171
+ async def aopenai_stream_chat(generator: AsyncIterable[Any]) -> Any:
1172
+ from openai.types.chat import (
1173
+ ChatCompletion,
1174
+ ChatCompletionChunk,
1175
+ ChatCompletionMessage,
1176
+ ChatCompletionMessageToolCall,
1177
+ )
1178
+ from openai.types.chat.chat_completion import Choice
1179
+ from openai.types.chat.chat_completion_message_tool_call import Function
1180
+
1181
+ chat_completion_chunks: List[ChatCompletionChunk] = []
1182
+ response: ChatCompletion = ChatCompletion(
1183
+ id="",
1184
+ object="chat.completion",
1185
+ choices=[
1186
+ Choice(
1187
+ finish_reason="stop",
1188
+ index=0,
1189
+ message=ChatCompletionMessage(role="assistant"),
1190
+ )
1191
+ ],
1192
+ created=0,
1193
+ model="",
1194
+ )
1195
+ content = ""
1196
+ tool_calls: Union[List[ChatCompletionMessageToolCall], None] = None
1197
+
1198
+ async for result in generator:
1199
+ chat_completion_chunks.append(result)
1200
+ choices = result.choices
1201
+ if len(choices) == 0:
1202
+ continue
1203
+ if choices[0].delta.content:
1204
+ content = f"{content}{choices[0].delta.content}"
1205
+
1206
+ delta = choices[0].delta
1207
+ if delta.tool_calls:
1208
+ tool_calls = tool_calls or []
1209
+ last_tool_call = None
1210
+ if len(tool_calls) > 0:
1211
+ last_tool_call = tool_calls[-1]
1212
+ tool_call = delta.tool_calls[0]
1213
+ if not tool_call.function:
1214
+ continue
1215
+ if not last_tool_call or tool_call.id:
1216
+ tool_calls.append(
1217
+ ChatCompletionMessageToolCall(
1218
+ id=tool_call.id or "",
1219
+ function=Function(
1220
+ name=tool_call.function.name or "",
1221
+ arguments=tool_call.function.arguments or "",
1222
+ ),
1223
+ type=tool_call.type or "function",
1224
+ )
1225
+ )
1226
+ continue
1227
+ last_tool_call.function.name = (
1228
+ f"{last_tool_call.function.name}{tool_call.function.name or ''}"
1229
+ )
1230
+ last_tool_call.function.arguments = f"{last_tool_call.function.arguments}{tool_call.function.arguments or ''}"
1231
+
1232
+ # After collecting all chunks, set the response attributes
1233
+ if chat_completion_chunks:
1234
+ last_result = chat_completion_chunks[-1]
1235
+ response.id = last_result.id
1236
+ response.created = last_result.created
1237
+ response.model = last_result.model
1238
+ response.system_fingerprint = getattr(last_result, "system_fingerprint", None)
1239
+ response.usage = last_result.usage
1240
+
1241
+ response.choices[0].message.content = content
1242
+ response.choices[0].message.tool_calls = tool_calls
1243
+ return response
1244
+
1245
+
1139
1246
  def openai_stream_completion(results: list):
1140
1247
  from openai.types.completion import Completion, CompletionChoice
1141
1248
 
@@ -1162,6 +1269,41 @@ def openai_stream_completion(results: list):
1162
1269
  return response
1163
1270
 
1164
1271
 
1272
+ async def aopenai_stream_completion(generator: AsyncIterable[Any]) -> Any:
1273
+ from openai.types.completion import Completion, CompletionChoice
1274
+
1275
+ completions: List[Completion] = []
1276
+ text = ""
1277
+ response = Completion(
1278
+ id="",
1279
+ created=0,
1280
+ model="",
1281
+ object="text_completion",
1282
+ choices=[CompletionChoice(finish_reason="stop", index=0, text="")],
1283
+ )
1284
+
1285
+ async for completion in generator:
1286
+ completions.append(completion)
1287
+ usage = completion.usage
1288
+ system_fingerprint = getattr(completion, "system_fingerprint", None)
1289
+ if len(completion.choices) > 0 and completion.choices[0].text:
1290
+ text = f"{text}{completion.choices[0].text}"
1291
+ if usage:
1292
+ response.usage = usage
1293
+ if system_fingerprint:
1294
+ response.system_fingerprint = system_fingerprint
1295
+
1296
+ # After collecting all completions, set the response attributes
1297
+ if completions:
1298
+ last_chunk = completions[-1]
1299
+ response.id = last_chunk.id
1300
+ response.created = last_chunk.created
1301
+ response.model = last_chunk.model
1302
+
1303
+ response.choices[0].text = text
1304
+ return response
1305
+
1306
+
1165
1307
  def anthropic_stream_message(results: list):
1166
1308
  from anthropic.types import Message, MessageStreamEvent, TextBlock, Usage
1167
1309
 
@@ -1192,6 +1334,39 @@ def anthropic_stream_message(results: list):
1192
1334
  return response
1193
1335
 
1194
1336
 
1337
+ async def aanthropic_stream_message(generator: AsyncIterable[Any]) -> Any:
1338
+ from anthropic.types import Message, MessageStreamEvent, TextBlock, Usage
1339
+
1340
+ message_stream_events: List[MessageStreamEvent] = []
1341
+ response: Message = Message(
1342
+ id="",
1343
+ model="",
1344
+ content=[],
1345
+ role="assistant",
1346
+ type="message",
1347
+ stop_reason="stop_sequence",
1348
+ stop_sequence=None,
1349
+ usage=Usage(input_tokens=0, output_tokens=0),
1350
+ )
1351
+ content = ""
1352
+
1353
+ async for result in generator:
1354
+ message_stream_events.append(result)
1355
+ if result.type == "message_start":
1356
+ response = result.message
1357
+ elif result.type == "content_block_delta":
1358
+ if result.delta.type == "text_delta":
1359
+ content = f"{content}{result.delta.text}"
1360
+ elif result.type == "message_delta":
1361
+ if hasattr(result, "usage"):
1362
+ response.usage.output_tokens = result.usage.output_tokens
1363
+ if hasattr(result.delta, "stop_reason"):
1364
+ response.stop_reason = result.delta.stop_reason
1365
+
1366
+ response.content.append(TextBlock(type="text", text=content))
1367
+ return response
1368
+
1369
+
1195
1370
  def anthropic_stream_completion(results: list):
1196
1371
  from anthropic.types import Completion
1197
1372
 
@@ -1212,6 +1387,33 @@ def anthropic_stream_completion(results: list):
1212
1387
  return response
1213
1388
 
1214
1389
 
1390
+ async def aanthropic_stream_completion(generator: AsyncIterable[Any]) -> Any:
1391
+ from anthropic.types import Completion
1392
+
1393
+ completions: List[Completion] = []
1394
+ text = ""
1395
+ response = Completion(
1396
+ id="",
1397
+ completion="",
1398
+ model="",
1399
+ stop_reason="stop",
1400
+ type="completion",
1401
+ )
1402
+
1403
+ async for completion in generator:
1404
+ completions.append(completion)
1405
+ text = f"{text}{completion.completion}"
1406
+
1407
+ # After collecting all completions, set the response attributes
1408
+ if completions:
1409
+ last_chunk = completions[-1]
1410
+ response.id = last_chunk.id
1411
+ response.model = last_chunk.model
1412
+
1413
+ response.completion = text
1414
+ return response
1415
+
1416
+
1215
1417
  def stream_response(
1216
1418
  generator: Generator, after_stream: Callable, map_results: Callable
1217
1419
  ):
@@ -1232,6 +1434,33 @@ def stream_response(
1232
1434
  yield data
1233
1435
 
1234
1436
 
1437
+ async def astream_response(
1438
+ generator: AsyncIterable[Any],
1439
+ after_stream: Callable[..., Any],
1440
+ map_results: Callable[[Any], Any],
1441
+ ) -> AsyncGenerator[Dict[str, Any], None]:
1442
+ data = {
1443
+ "request_id": None,
1444
+ "raw_response": None,
1445
+ "prompt_blueprint": None,
1446
+ }
1447
+ results = []
1448
+ async for result in generator:
1449
+ results.append(result)
1450
+ data["raw_response"] = result
1451
+ yield data
1452
+ request_response = await map_results(results)
1453
+ if inspect.iscoroutinefunction(after_stream):
1454
+ # after_stream is an async function
1455
+ response = await after_stream(request_response=request_response.model_dump())
1456
+ else:
1457
+ # after_stream is synchronous
1458
+ response = after_stream(request_response=request_response.model_dump())
1459
+ data["request_id"] = response.get("request_id")
1460
+ data["prompt_blueprint"] = response.get("prompt_blueprint")
1461
+ yield data
1462
+
1463
+
1235
1464
  def openai_chat_request(client, **kwargs):
1236
1465
  return client.chat.completions.create(**kwargs)
1237
1466
 
@@ -1256,6 +1485,30 @@ def openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1256
1485
  return request_to_make(client, **kwargs)
1257
1486
 
1258
1487
 
1488
+ async def aopenai_chat_request(client, **kwargs):
1489
+ return await client.chat.completions.create(**kwargs)
1490
+
1491
+
1492
+ async def aopenai_completions_request(client, **kwargs):
1493
+ return await client.completions.create(**kwargs)
1494
+
1495
+
1496
+ AMAP_TYPE_TO_OPENAI_FUNCTION = {
1497
+ "chat": aopenai_chat_request,
1498
+ "completion": aopenai_completions_request,
1499
+ }
1500
+
1501
+
1502
+ async def aopenai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1503
+ from openai import AsyncOpenAI
1504
+
1505
+ client = AsyncOpenAI(base_url=kwargs.pop("base_url", None))
1506
+ request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[
1507
+ prompt_blueprint["prompt_template"]["type"]
1508
+ ]
1509
+ return await request_to_make(client, **kwargs)
1510
+
1511
+
1259
1512
  def azure_openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1260
1513
  from openai import AzureOpenAI
1261
1514
 
@@ -1266,6 +1519,16 @@ def azure_openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1266
1519
  return request_to_make(client, **kwargs)
1267
1520
 
1268
1521
 
1522
+ async def aazure_openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1523
+ from openai import AsyncAzureOpenAI
1524
+
1525
+ client = AsyncAzureOpenAI(azure_endpoint=kwargs.pop("base_url", None))
1526
+ request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[
1527
+ prompt_blueprint["prompt_template"]["type"]
1528
+ ]
1529
+ return await request_to_make(client, **kwargs)
1530
+
1531
+
1269
1532
  def anthropic_chat_request(client, **kwargs):
1270
1533
  return client.messages.create(**kwargs)
1271
1534
 
@@ -1290,6 +1553,30 @@ def anthropic_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1290
1553
  return request_to_make(client, **kwargs)
1291
1554
 
1292
1555
 
1556
+ async def aanthropic_chat_request(client, **kwargs):
1557
+ return await client.messages.create(**kwargs)
1558
+
1559
+
1560
+ async def aanthropic_completions_request(client, **kwargs):
1561
+ return await client.completions.create(**kwargs)
1562
+
1563
+
1564
+ AMAP_TYPE_TO_ANTHROPIC_FUNCTION = {
1565
+ "chat": aanthropic_chat_request,
1566
+ "completion": aanthropic_completions_request,
1567
+ }
1568
+
1569
+
1570
+ async def aanthropic_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1571
+ from anthropic import AsyncAnthropic
1572
+
1573
+ client = AsyncAnthropic(base_url=kwargs.pop("base_url", None))
1574
+ request_to_make = AMAP_TYPE_TO_ANTHROPIC_FUNCTION[
1575
+ prompt_blueprint["prompt_template"]["type"]
1576
+ ]
1577
+ return await request_to_make(client, **kwargs)
1578
+
1579
+
1293
1580
  # do not remove! This is used in the langchain integration.
1294
1581
  def get_api_key():
1295
1582
  # raise an error if the api key is not set
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: promptlayer
3
- Version: 1.0.29
3
+ Version: 1.0.31
4
4
  Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
5
5
  License: Apache-2.0
6
6
  Author: Magniv
@@ -1,8 +1,9 @@
1
- promptlayer/__init__.py,sha256=v0SGSQb6xyDYZEA6IFcOSWw9FK_WUCe8OaEc1ZJ1N9M,140
1
+ promptlayer/__init__.py,sha256=R08U9XPE7no3b74m1e1etSNqml_wwXcQKWv2RwzX668,140
2
2
  promptlayer/groups/__init__.py,sha256=xhOAolLUBkr76ZHvJr29OwjCIk1V9qKQXjZCuyTJUIY,429
3
3
  promptlayer/groups/groups.py,sha256=YPROicy-TzpkrpA8vOpZS2lwvJ6VRtlbQ1S2oT1N0vM,338
4
- promptlayer/promptlayer.py,sha256=3M6kBCJPAejUaWE-NhynOWe1Ml6VPXxCVh3OlME4WUA,20879
4
+ promptlayer/promptlayer.py,sha256=ZzHLMwbF-qq9SXNb610tmAOnGzOEvugGjHp3MtGWMGA,20083
5
5
  promptlayer/promptlayer_base.py,sha256=sev-EZehRXJSZSmJtMkqmAUK1345pqbDY_lNjPP5MYA,7158
6
+ promptlayer/promptlayer_mixins.py,sha256=CZRX-kjd067JLzxd0qOWBnOCHdC5CZ4bXFDs7CDMplg,9572
6
7
  promptlayer/span_exporter.py,sha256=zIJNsb3Fe6yb5wKLDmkoPF2wqFjk1p39E0jWHD2plzI,2658
7
8
  promptlayer/templates.py,sha256=bdX8ZxydWwF9QMF1UBD-qoYqYRPrUSTAt88r2D8ws7c,1193
8
9
  promptlayer/track/__init__.py,sha256=8J258daTXb_P8eHRbYR2Au1lJzTh_92UkOHf7q0NpKs,1757
@@ -10,8 +11,8 @@ promptlayer/track/track.py,sha256=UdkCxhWUvhvPdhsoHj4qmeiRq6xLcWmeIdYXrgZph04,32
10
11
  promptlayer/types/__init__.py,sha256=xJcvQuOk91ZBBePb40-1FDNDKYrZoH5lPE2q6_UhprM,111
11
12
  promptlayer/types/prompt_template.py,sha256=TUXLXvuvew0EBLfTMBa2LhFeQoF7R-tcFKg7_UUtHMQ,4433
12
13
  promptlayer/types/request_log.py,sha256=xU6bcxQar6GaBOJlgZTavXUV3FjE8sF_nSjPu4Ya_00,174
13
- promptlayer/utils.py,sha256=s_7XMGRjuqTJjPDArixBahsGVlO7xcerxgcVijd12BQ,44690
14
- promptlayer-1.0.29.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
15
- promptlayer-1.0.29.dist-info/METADATA,sha256=Sh2YeNNgTxoPUG1jVh6z4M4vCV6zrSssU-PERG1XnSc,4824
16
- promptlayer-1.0.29.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
17
- promptlayer-1.0.29.dist-info/RECORD,,
14
+ promptlayer/utils.py,sha256=yPEzt3JZT7rbVch39Gv8Uk0-CR8Ih5Ym9iIfS0VEEH8,53960
15
+ promptlayer-1.0.31.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
16
+ promptlayer-1.0.31.dist-info/METADATA,sha256=G6f6UFUFwgrmLIOwO46z3tIh99Rnjk045A34G8IrV8Y,4824
17
+ promptlayer-1.0.31.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
18
+ promptlayer-1.0.31.dist-info/RECORD,,