promptlayer 1.0.30__py3-none-any.whl → 1.0.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of promptlayer might be problematic. Click here for more details.
- promptlayer/__init__.py +1 -1
- promptlayer/promptlayer.py +193 -224
- promptlayer/promptlayer_mixins.py +283 -0
- promptlayer/types/prompt_template.py +17 -1
- promptlayer/utils.py +288 -1
- {promptlayer-1.0.30.dist-info → promptlayer-1.0.32.dist-info}/METADATA +1 -1
- {promptlayer-1.0.30.dist-info → promptlayer-1.0.32.dist-info}/RECORD +9 -8
- {promptlayer-1.0.30.dist-info → promptlayer-1.0.32.dist-info}/LICENSE +0 -0
- {promptlayer-1.0.30.dist-info → promptlayer-1.0.32.dist-info}/WHEEL +0 -0
promptlayer/__init__.py
CHANGED
promptlayer/promptlayer.py
CHANGED
|
@@ -1,78 +1,27 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
-
import datetime
|
|
3
2
|
import os
|
|
4
|
-
from copy import deepcopy
|
|
5
|
-
from functools import wraps
|
|
6
3
|
from typing import Any, Dict, List, Literal, Optional, Union
|
|
7
4
|
|
|
8
5
|
import nest_asyncio
|
|
9
|
-
from opentelemetry.sdk.resources import Resource
|
|
10
|
-
from opentelemetry.sdk.trace import TracerProvider
|
|
11
|
-
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
|
12
|
-
from opentelemetry.semconv.resource import ResourceAttributes
|
|
13
6
|
|
|
14
7
|
from promptlayer.groups import AsyncGroupManager, GroupManager
|
|
15
8
|
from promptlayer.promptlayer_base import PromptLayerBase
|
|
16
|
-
from promptlayer.
|
|
9
|
+
from promptlayer.promptlayer_mixins import PromptLayerMixin
|
|
17
10
|
from promptlayer.templates import AsyncTemplateManager, TemplateManager
|
|
18
11
|
from promptlayer.track import AsyncTrackManager, TrackManager
|
|
19
12
|
from promptlayer.types.prompt_template import PromptTemplate
|
|
20
13
|
from promptlayer.utils import (
|
|
21
|
-
anthropic_request,
|
|
22
|
-
anthropic_stream_completion,
|
|
23
|
-
anthropic_stream_message,
|
|
24
14
|
arun_workflow_request,
|
|
15
|
+
astream_response,
|
|
16
|
+
atrack_request,
|
|
25
17
|
autil_log_request,
|
|
26
|
-
azure_openai_request,
|
|
27
|
-
openai_request,
|
|
28
|
-
openai_stream_chat,
|
|
29
|
-
openai_stream_completion,
|
|
30
18
|
stream_response,
|
|
31
19
|
track_request,
|
|
32
20
|
util_log_request,
|
|
33
21
|
)
|
|
34
22
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
"chat": {
|
|
38
|
-
"function_name": "openai.chat.completions.create",
|
|
39
|
-
"stream_function": openai_stream_chat,
|
|
40
|
-
},
|
|
41
|
-
"completion": {
|
|
42
|
-
"function_name": "openai.completions.create",
|
|
43
|
-
"stream_function": openai_stream_completion,
|
|
44
|
-
},
|
|
45
|
-
},
|
|
46
|
-
"anthropic": {
|
|
47
|
-
"chat": {
|
|
48
|
-
"function_name": "anthropic.messages.create",
|
|
49
|
-
"stream_function": anthropic_stream_message,
|
|
50
|
-
},
|
|
51
|
-
"completion": {
|
|
52
|
-
"function_name": "anthropic.completions.create",
|
|
53
|
-
"stream_function": anthropic_stream_completion,
|
|
54
|
-
},
|
|
55
|
-
},
|
|
56
|
-
"openai.azure": {
|
|
57
|
-
"chat": {
|
|
58
|
-
"function_name": "openai.AzureOpenAI.chat.completions.create",
|
|
59
|
-
"stream_function": openai_stream_chat,
|
|
60
|
-
},
|
|
61
|
-
"completion": {
|
|
62
|
-
"function_name": "openai.AzureOpenAI.completions.create",
|
|
63
|
-
"stream_function": openai_stream_completion,
|
|
64
|
-
},
|
|
65
|
-
},
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
MAP_PROVIDER_TO_FUNCTION = {
|
|
69
|
-
"openai": openai_request,
|
|
70
|
-
"anthropic": anthropic_request,
|
|
71
|
-
"openai.azure": azure_openai_request,
|
|
72
|
-
}
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
class PromptLayer:
|
|
23
|
+
|
|
24
|
+
class PromptLayer(PromptLayerMixin):
|
|
76
25
|
def __init__(
|
|
77
26
|
self,
|
|
78
27
|
api_key: str = None,
|
|
@@ -134,109 +83,18 @@ class PromptLayer:
|
|
|
134
83
|
):
|
|
135
84
|
def _track_request(**body):
|
|
136
85
|
track_request_kwargs = self._prepare_track_request_kwargs(
|
|
137
|
-
|
|
86
|
+
self.api_key,
|
|
87
|
+
request_params,
|
|
88
|
+
tags,
|
|
89
|
+
input_variables,
|
|
90
|
+
group_id,
|
|
91
|
+
pl_run_span_id,
|
|
92
|
+
**body,
|
|
138
93
|
)
|
|
139
94
|
return track_request(**track_request_kwargs)
|
|
140
95
|
|
|
141
96
|
return _track_request
|
|
142
97
|
|
|
143
|
-
@staticmethod
|
|
144
|
-
def _initialize_tracer(api_key: str = None, enable_tracing: bool = False):
|
|
145
|
-
if enable_tracing:
|
|
146
|
-
resource = Resource(
|
|
147
|
-
attributes={ResourceAttributes.SERVICE_NAME: "prompt-layer-library"}
|
|
148
|
-
)
|
|
149
|
-
tracer_provider = TracerProvider(resource=resource)
|
|
150
|
-
promptlayer_exporter = PromptLayerSpanExporter(api_key=api_key)
|
|
151
|
-
span_processor = BatchSpanProcessor(promptlayer_exporter)
|
|
152
|
-
tracer_provider.add_span_processor(span_processor)
|
|
153
|
-
tracer = tracer_provider.get_tracer(__name__)
|
|
154
|
-
return tracer_provider, tracer
|
|
155
|
-
else:
|
|
156
|
-
return None, None
|
|
157
|
-
|
|
158
|
-
@staticmethod
|
|
159
|
-
def _prepare_get_prompt_template_params(
|
|
160
|
-
*, prompt_version, prompt_release_label, input_variables, metadata
|
|
161
|
-
):
|
|
162
|
-
params = {}
|
|
163
|
-
|
|
164
|
-
if prompt_version:
|
|
165
|
-
params["version"] = prompt_version
|
|
166
|
-
if prompt_release_label:
|
|
167
|
-
params["label"] = prompt_release_label
|
|
168
|
-
if input_variables:
|
|
169
|
-
params["input_variables"] = input_variables
|
|
170
|
-
if metadata:
|
|
171
|
-
params["metadata_filters"] = metadata
|
|
172
|
-
|
|
173
|
-
return params
|
|
174
|
-
|
|
175
|
-
@staticmethod
|
|
176
|
-
def _prepare_llm_request_params(
|
|
177
|
-
*,
|
|
178
|
-
prompt_blueprint,
|
|
179
|
-
prompt_template,
|
|
180
|
-
prompt_blueprint_model,
|
|
181
|
-
model_parameter_overrides,
|
|
182
|
-
stream,
|
|
183
|
-
):
|
|
184
|
-
provider = prompt_blueprint_model["provider"]
|
|
185
|
-
kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
|
|
186
|
-
config = MAP_PROVIDER_TO_FUNCTION_NAME[provider][prompt_template["type"]]
|
|
187
|
-
|
|
188
|
-
if provider_base_url := prompt_blueprint.get("provider_base_url"):
|
|
189
|
-
kwargs["base_url"] = provider_base_url["url"]
|
|
190
|
-
|
|
191
|
-
if model_parameter_overrides:
|
|
192
|
-
kwargs.update(model_parameter_overrides)
|
|
193
|
-
|
|
194
|
-
kwargs["stream"] = stream
|
|
195
|
-
if stream and provider in ["openai", "openai.azure"]:
|
|
196
|
-
kwargs["stream_options"] = {"include_usage": True}
|
|
197
|
-
|
|
198
|
-
return {
|
|
199
|
-
"provider": provider,
|
|
200
|
-
"function_name": config["function_name"],
|
|
201
|
-
"stream_function": config["stream_function"],
|
|
202
|
-
"request_function": MAP_PROVIDER_TO_FUNCTION[provider],
|
|
203
|
-
"kwargs": kwargs,
|
|
204
|
-
"prompt_blueprint": prompt_blueprint,
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
def _prepare_track_request_kwargs(
|
|
208
|
-
self,
|
|
209
|
-
request_params,
|
|
210
|
-
tags,
|
|
211
|
-
input_variables,
|
|
212
|
-
group_id,
|
|
213
|
-
pl_run_span_id: Union[str, None] = None,
|
|
214
|
-
metadata: Union[Dict[str, str], None] = None,
|
|
215
|
-
**body,
|
|
216
|
-
):
|
|
217
|
-
return {
|
|
218
|
-
"function_name": request_params["function_name"],
|
|
219
|
-
"provider_type": request_params["provider"],
|
|
220
|
-
"args": [],
|
|
221
|
-
"kwargs": request_params["kwargs"],
|
|
222
|
-
"tags": tags,
|
|
223
|
-
"request_start_time": datetime.datetime.now(
|
|
224
|
-
datetime.timezone.utc
|
|
225
|
-
).timestamp(),
|
|
226
|
-
"request_end_time": datetime.datetime.now(
|
|
227
|
-
datetime.timezone.utc
|
|
228
|
-
).timestamp(),
|
|
229
|
-
"api_key": self.api_key,
|
|
230
|
-
"metadata": metadata,
|
|
231
|
-
"prompt_id": request_params["prompt_blueprint"]["id"],
|
|
232
|
-
"prompt_version": request_params["prompt_blueprint"]["version"],
|
|
233
|
-
"prompt_input_variables": input_variables,
|
|
234
|
-
"group_id": group_id,
|
|
235
|
-
"return_prompt_blueprint": True,
|
|
236
|
-
"span_id": pl_run_span_id,
|
|
237
|
-
**body,
|
|
238
|
-
}
|
|
239
|
-
|
|
240
98
|
def _run_internal(
|
|
241
99
|
self,
|
|
242
100
|
*,
|
|
@@ -313,6 +171,7 @@ class PromptLayer:
|
|
|
313
171
|
**body,
|
|
314
172
|
):
|
|
315
173
|
track_request_kwargs = self._prepare_track_request_kwargs(
|
|
174
|
+
self.api_key,
|
|
316
175
|
request_params,
|
|
317
176
|
tags,
|
|
318
177
|
input_variables,
|
|
@@ -323,31 +182,6 @@ class PromptLayer:
|
|
|
323
182
|
)
|
|
324
183
|
return track_request(**track_request_kwargs)
|
|
325
184
|
|
|
326
|
-
@staticmethod
|
|
327
|
-
def _validate_and_extract_model_from_prompt_blueprint(
|
|
328
|
-
*, prompt_blueprint, prompt_name
|
|
329
|
-
):
|
|
330
|
-
if not prompt_blueprint["llm_kwargs"]:
|
|
331
|
-
raise ValueError(
|
|
332
|
-
f"Prompt '{prompt_name}' does not have any LLM kwargs associated with it."
|
|
333
|
-
)
|
|
334
|
-
|
|
335
|
-
prompt_blueprint_metadata = prompt_blueprint.get("metadata")
|
|
336
|
-
|
|
337
|
-
if not prompt_blueprint_metadata:
|
|
338
|
-
raise ValueError(
|
|
339
|
-
f"Prompt '{prompt_name}' does not have any metadata associated with it."
|
|
340
|
-
)
|
|
341
|
-
|
|
342
|
-
prompt_blueprint_model = prompt_blueprint_metadata.get("model")
|
|
343
|
-
|
|
344
|
-
if not prompt_blueprint_model:
|
|
345
|
-
raise ValueError(
|
|
346
|
-
f"Prompt '{prompt_name}' does not have a model parameters associated with it."
|
|
347
|
-
)
|
|
348
|
-
|
|
349
|
-
return prompt_blueprint_model
|
|
350
|
-
|
|
351
185
|
def run(
|
|
352
186
|
self,
|
|
353
187
|
prompt_name: str,
|
|
@@ -433,50 +267,6 @@ class PromptLayer:
|
|
|
433
267
|
except Exception as e:
|
|
434
268
|
raise Exception(f"Error running workflow: {str(e)}")
|
|
435
269
|
|
|
436
|
-
def traceable(self, attributes=None, name=None):
|
|
437
|
-
def decorator(func):
|
|
438
|
-
@wraps(func)
|
|
439
|
-
def sync_wrapper(*args, **kwargs):
|
|
440
|
-
if self.tracer:
|
|
441
|
-
span_name = name or func.__name__
|
|
442
|
-
with self.tracer.start_as_current_span(span_name) as span:
|
|
443
|
-
if attributes:
|
|
444
|
-
for key, value in attributes.items():
|
|
445
|
-
span.set_attribute(key, value)
|
|
446
|
-
|
|
447
|
-
span.set_attribute(
|
|
448
|
-
"function_input", str({"args": args, "kwargs": kwargs})
|
|
449
|
-
)
|
|
450
|
-
result = func(*args, **kwargs)
|
|
451
|
-
span.set_attribute("function_output", str(result))
|
|
452
|
-
|
|
453
|
-
return result
|
|
454
|
-
else:
|
|
455
|
-
return func(*args, **kwargs)
|
|
456
|
-
|
|
457
|
-
@wraps(func)
|
|
458
|
-
async def async_wrapper(*args, **kwargs):
|
|
459
|
-
if self.tracer:
|
|
460
|
-
span_name = name or func.__name__
|
|
461
|
-
with self.tracer.start_as_current_span(span_name) as span:
|
|
462
|
-
if attributes:
|
|
463
|
-
for key, value in attributes.items():
|
|
464
|
-
span.set_attribute(key, value)
|
|
465
|
-
|
|
466
|
-
span.set_attribute(
|
|
467
|
-
"function_input", str({"args": args, "kwargs": kwargs})
|
|
468
|
-
)
|
|
469
|
-
result = await func(*args, **kwargs)
|
|
470
|
-
span.set_attribute("function_output", str(result))
|
|
471
|
-
|
|
472
|
-
return result
|
|
473
|
-
else:
|
|
474
|
-
return await func(*args, **kwargs)
|
|
475
|
-
|
|
476
|
-
return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
|
|
477
|
-
|
|
478
|
-
return decorator
|
|
479
|
-
|
|
480
270
|
def log_request(
|
|
481
271
|
self,
|
|
482
272
|
*,
|
|
@@ -520,10 +310,11 @@ class PromptLayer:
|
|
|
520
310
|
)
|
|
521
311
|
|
|
522
312
|
|
|
523
|
-
class AsyncPromptLayer:
|
|
313
|
+
class AsyncPromptLayer(PromptLayerMixin):
|
|
524
314
|
def __init__(
|
|
525
315
|
self,
|
|
526
316
|
api_key: str = None,
|
|
317
|
+
enable_tracing: bool = False,
|
|
527
318
|
):
|
|
528
319
|
if api_key is None:
|
|
529
320
|
api_key = os.environ.get("PROMPTLAYER_API_KEY")
|
|
@@ -537,8 +328,36 @@ class AsyncPromptLayer:
|
|
|
537
328
|
self.api_key = api_key
|
|
538
329
|
self.templates = AsyncTemplateManager(api_key)
|
|
539
330
|
self.group = AsyncGroupManager(api_key)
|
|
331
|
+
self.tracer_provider, self.tracer = self._initialize_tracer(
|
|
332
|
+
api_key, enable_tracing
|
|
333
|
+
)
|
|
540
334
|
self.track = AsyncTrackManager(api_key)
|
|
541
335
|
|
|
336
|
+
def __getattr__(
|
|
337
|
+
self, name: Union[Literal["openai"], Literal["anthropic"], Literal["prompts"]]
|
|
338
|
+
):
|
|
339
|
+
if name == "openai":
|
|
340
|
+
import openai as openai_module
|
|
341
|
+
|
|
342
|
+
openai = PromptLayerBase(
|
|
343
|
+
openai_module,
|
|
344
|
+
function_name="openai",
|
|
345
|
+
api_key=self.api_key,
|
|
346
|
+
)
|
|
347
|
+
return openai
|
|
348
|
+
elif name == "anthropic":
|
|
349
|
+
import anthropic as anthropic_module
|
|
350
|
+
|
|
351
|
+
anthropic = PromptLayerBase(
|
|
352
|
+
anthropic_module,
|
|
353
|
+
function_name="anthropic",
|
|
354
|
+
provider_type="anthropic",
|
|
355
|
+
api_key=self.api_key,
|
|
356
|
+
)
|
|
357
|
+
return anthropic
|
|
358
|
+
else:
|
|
359
|
+
raise AttributeError(f"module {__name__} has no attribute {name}")
|
|
360
|
+
|
|
542
361
|
async def run_workflow(
|
|
543
362
|
self,
|
|
544
363
|
workflow_name: str,
|
|
@@ -564,6 +383,43 @@ class AsyncPromptLayer:
|
|
|
564
383
|
except Exception as e:
|
|
565
384
|
raise Exception(f"Error running workflow: {str(e)}")
|
|
566
385
|
|
|
386
|
+
async def run(
|
|
387
|
+
self,
|
|
388
|
+
prompt_name: str,
|
|
389
|
+
prompt_version: Union[int, None] = None,
|
|
390
|
+
prompt_release_label: Union[str, None] = None,
|
|
391
|
+
input_variables: Union[Dict[str, Any], None] = None,
|
|
392
|
+
model_parameter_overrides: Union[Dict[str, Any], None] = None,
|
|
393
|
+
tags: Union[List[str], None] = None,
|
|
394
|
+
metadata: Union[Dict[str, str], None] = None,
|
|
395
|
+
group_id: Union[int, None] = None,
|
|
396
|
+
stream: bool = False,
|
|
397
|
+
) -> Dict[str, Any]:
|
|
398
|
+
_run_internal_kwargs = {
|
|
399
|
+
"prompt_name": prompt_name,
|
|
400
|
+
"prompt_version": prompt_version,
|
|
401
|
+
"prompt_release_label": prompt_release_label,
|
|
402
|
+
"input_variables": input_variables,
|
|
403
|
+
"model_parameter_overrides": model_parameter_overrides,
|
|
404
|
+
"tags": tags,
|
|
405
|
+
"metadata": metadata,
|
|
406
|
+
"group_id": group_id,
|
|
407
|
+
"stream": stream,
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
if self.tracer:
|
|
411
|
+
with self.tracer.start_as_current_span("PromptLayer Run") as span:
|
|
412
|
+
span.set_attribute("prompt_name", prompt_name)
|
|
413
|
+
span.set_attribute("function_input", str(_run_internal_kwargs))
|
|
414
|
+
pl_run_span_id = hex(span.context.span_id)[2:].zfill(16)
|
|
415
|
+
result = await self._run_internal(
|
|
416
|
+
**_run_internal_kwargs, pl_run_span_id=pl_run_span_id
|
|
417
|
+
)
|
|
418
|
+
span.set_attribute("function_output", str(result))
|
|
419
|
+
return result
|
|
420
|
+
else:
|
|
421
|
+
return await self._run_internal(**_run_internal_kwargs)
|
|
422
|
+
|
|
567
423
|
async def log_request(
|
|
568
424
|
self,
|
|
569
425
|
*,
|
|
@@ -605,3 +461,116 @@ class AsyncPromptLayer:
|
|
|
605
461
|
function_name=function_name,
|
|
606
462
|
score=score,
|
|
607
463
|
)
|
|
464
|
+
|
|
465
|
+
async def _create_track_request_callable(
|
|
466
|
+
self,
|
|
467
|
+
*,
|
|
468
|
+
request_params,
|
|
469
|
+
tags,
|
|
470
|
+
input_variables,
|
|
471
|
+
group_id,
|
|
472
|
+
pl_run_span_id: Union[str, None] = None,
|
|
473
|
+
):
|
|
474
|
+
async def _track_request(**body):
|
|
475
|
+
track_request_kwargs = self._prepare_track_request_kwargs(
|
|
476
|
+
self.api_key,
|
|
477
|
+
request_params,
|
|
478
|
+
tags,
|
|
479
|
+
input_variables,
|
|
480
|
+
group_id,
|
|
481
|
+
pl_run_span_id,
|
|
482
|
+
**body,
|
|
483
|
+
)
|
|
484
|
+
return await atrack_request(**track_request_kwargs)
|
|
485
|
+
|
|
486
|
+
return await _track_request
|
|
487
|
+
|
|
488
|
+
async def _track_request_log(
|
|
489
|
+
self,
|
|
490
|
+
request_params,
|
|
491
|
+
tags,
|
|
492
|
+
input_variables,
|
|
493
|
+
group_id,
|
|
494
|
+
pl_run_span_id: Union[str, None] = None,
|
|
495
|
+
metadata: Union[Dict[str, str], None] = None,
|
|
496
|
+
**body,
|
|
497
|
+
):
|
|
498
|
+
track_request_kwargs = self._prepare_track_request_kwargs(
|
|
499
|
+
self.api_key,
|
|
500
|
+
request_params,
|
|
501
|
+
tags,
|
|
502
|
+
input_variables,
|
|
503
|
+
group_id,
|
|
504
|
+
pl_run_span_id,
|
|
505
|
+
metadata=metadata,
|
|
506
|
+
**body,
|
|
507
|
+
)
|
|
508
|
+
return await atrack_request(**track_request_kwargs)
|
|
509
|
+
|
|
510
|
+
async def _run_internal(
|
|
511
|
+
self,
|
|
512
|
+
*,
|
|
513
|
+
prompt_name: str,
|
|
514
|
+
prompt_version: Union[int, None] = None,
|
|
515
|
+
prompt_release_label: Union[str, None] = None,
|
|
516
|
+
input_variables: Union[Dict[str, Any], None] = None,
|
|
517
|
+
model_parameter_overrides: Union[Dict[str, Any], None] = None,
|
|
518
|
+
tags: Union[List[str], None] = None,
|
|
519
|
+
metadata: Union[Dict[str, str], None] = None,
|
|
520
|
+
group_id: Union[int, None] = None,
|
|
521
|
+
stream: bool = False,
|
|
522
|
+
pl_run_span_id: Union[str, None] = None,
|
|
523
|
+
) -> Dict[str, Any]:
|
|
524
|
+
get_prompt_template_params = self._prepare_get_prompt_template_params(
|
|
525
|
+
prompt_version=prompt_version,
|
|
526
|
+
prompt_release_label=prompt_release_label,
|
|
527
|
+
input_variables=input_variables,
|
|
528
|
+
metadata=metadata,
|
|
529
|
+
)
|
|
530
|
+
prompt_blueprint = await self.templates.get(
|
|
531
|
+
prompt_name, get_prompt_template_params
|
|
532
|
+
)
|
|
533
|
+
prompt_blueprint_model = self._validate_and_extract_model_from_prompt_blueprint(
|
|
534
|
+
prompt_blueprint=prompt_blueprint, prompt_name=prompt_name
|
|
535
|
+
)
|
|
536
|
+
llm_request_params = self._prepare_llm_request_params(
|
|
537
|
+
prompt_blueprint=prompt_blueprint,
|
|
538
|
+
prompt_template=prompt_blueprint["prompt_template"],
|
|
539
|
+
prompt_blueprint_model=prompt_blueprint_model,
|
|
540
|
+
model_parameter_overrides=model_parameter_overrides,
|
|
541
|
+
stream=stream,
|
|
542
|
+
is_async=True,
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
response = await llm_request_params["request_function"](
|
|
546
|
+
llm_request_params["prompt_blueprint"], **llm_request_params["kwargs"]
|
|
547
|
+
)
|
|
548
|
+
|
|
549
|
+
if stream:
|
|
550
|
+
return astream_response(
|
|
551
|
+
response,
|
|
552
|
+
self._create_track_request_callable(
|
|
553
|
+
request_params=llm_request_params,
|
|
554
|
+
tags=tags,
|
|
555
|
+
input_variables=input_variables,
|
|
556
|
+
group_id=group_id,
|
|
557
|
+
pl_run_span_id=pl_run_span_id,
|
|
558
|
+
),
|
|
559
|
+
llm_request_params["stream_function"],
|
|
560
|
+
)
|
|
561
|
+
|
|
562
|
+
request_log = await self._track_request_log(
|
|
563
|
+
llm_request_params,
|
|
564
|
+
tags,
|
|
565
|
+
input_variables,
|
|
566
|
+
group_id,
|
|
567
|
+
pl_run_span_id,
|
|
568
|
+
metadata=metadata,
|
|
569
|
+
request_response=response.model_dump(),
|
|
570
|
+
)
|
|
571
|
+
|
|
572
|
+
return {
|
|
573
|
+
"request_id": request_log.get("request_id", None),
|
|
574
|
+
"raw_response": response,
|
|
575
|
+
"prompt_blueprint": request_log.get("prompt_blueprint", None),
|
|
576
|
+
}
|
|
@@ -0,0 +1,283 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import datetime
|
|
3
|
+
from copy import deepcopy
|
|
4
|
+
from functools import wraps
|
|
5
|
+
from typing import Dict, Union
|
|
6
|
+
|
|
7
|
+
from opentelemetry.sdk.resources import Resource
|
|
8
|
+
from opentelemetry.sdk.trace import TracerProvider
|
|
9
|
+
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
|
10
|
+
from opentelemetry.semconv.resource import ResourceAttributes
|
|
11
|
+
|
|
12
|
+
from promptlayer.span_exporter import PromptLayerSpanExporter
|
|
13
|
+
from promptlayer.utils import (
|
|
14
|
+
aanthropic_request,
|
|
15
|
+
aanthropic_stream_completion,
|
|
16
|
+
aanthropic_stream_message,
|
|
17
|
+
aazure_openai_request,
|
|
18
|
+
anthropic_request,
|
|
19
|
+
anthropic_stream_completion,
|
|
20
|
+
anthropic_stream_message,
|
|
21
|
+
aopenai_request,
|
|
22
|
+
aopenai_stream_chat,
|
|
23
|
+
aopenai_stream_completion,
|
|
24
|
+
azure_openai_request,
|
|
25
|
+
openai_request,
|
|
26
|
+
openai_stream_chat,
|
|
27
|
+
openai_stream_completion,
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
MAP_PROVIDER_TO_FUNCTION_NAME = {
|
|
31
|
+
"openai": {
|
|
32
|
+
"chat": {
|
|
33
|
+
"function_name": "openai.chat.completions.create",
|
|
34
|
+
"stream_function": openai_stream_chat,
|
|
35
|
+
},
|
|
36
|
+
"completion": {
|
|
37
|
+
"function_name": "openai.completions.create",
|
|
38
|
+
"stream_function": openai_stream_completion,
|
|
39
|
+
},
|
|
40
|
+
},
|
|
41
|
+
"anthropic": {
|
|
42
|
+
"chat": {
|
|
43
|
+
"function_name": "anthropic.messages.create",
|
|
44
|
+
"stream_function": anthropic_stream_message,
|
|
45
|
+
},
|
|
46
|
+
"completion": {
|
|
47
|
+
"function_name": "anthropic.completions.create",
|
|
48
|
+
"stream_function": anthropic_stream_completion,
|
|
49
|
+
},
|
|
50
|
+
},
|
|
51
|
+
"openai.azure": {
|
|
52
|
+
"chat": {
|
|
53
|
+
"function_name": "openai.AzureOpenAI.chat.completions.create",
|
|
54
|
+
"stream_function": openai_stream_chat,
|
|
55
|
+
},
|
|
56
|
+
"completion": {
|
|
57
|
+
"function_name": "openai.AzureOpenAI.completions.create",
|
|
58
|
+
"stream_function": openai_stream_completion,
|
|
59
|
+
},
|
|
60
|
+
},
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
MAP_PROVIDER_TO_FUNCTION = {
|
|
65
|
+
"openai": openai_request,
|
|
66
|
+
"anthropic": anthropic_request,
|
|
67
|
+
"openai.azure": azure_openai_request,
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
AMAP_PROVIDER_TO_FUNCTION_NAME = {
|
|
71
|
+
"openai": {
|
|
72
|
+
"chat": {
|
|
73
|
+
"function_name": "openai.chat.completions.create",
|
|
74
|
+
"stream_function": aopenai_stream_chat,
|
|
75
|
+
},
|
|
76
|
+
"completion": {
|
|
77
|
+
"function_name": "openai.completions.create",
|
|
78
|
+
"stream_function": aopenai_stream_completion,
|
|
79
|
+
},
|
|
80
|
+
},
|
|
81
|
+
"anthropic": {
|
|
82
|
+
"chat": {
|
|
83
|
+
"function_name": "anthropic.messages.create",
|
|
84
|
+
"stream_function": aanthropic_stream_message,
|
|
85
|
+
},
|
|
86
|
+
"completion": {
|
|
87
|
+
"function_name": "anthropic.completions.create",
|
|
88
|
+
"stream_function": aanthropic_stream_completion,
|
|
89
|
+
},
|
|
90
|
+
},
|
|
91
|
+
"openai.azure": {
|
|
92
|
+
"chat": {
|
|
93
|
+
"function_name": "openai.AzureOpenAI.chat.completions.create",
|
|
94
|
+
"stream_function": aopenai_stream_chat,
|
|
95
|
+
},
|
|
96
|
+
"completion": {
|
|
97
|
+
"function_name": "openai.AzureOpenAI.completions.create",
|
|
98
|
+
"stream_function": aopenai_stream_completion,
|
|
99
|
+
},
|
|
100
|
+
},
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
AMAP_PROVIDER_TO_FUNCTION = {
|
|
105
|
+
"openai": aopenai_request,
|
|
106
|
+
"anthropic": aanthropic_request,
|
|
107
|
+
"openai.azure": aazure_openai_request,
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class PromptLayerMixin:
|
|
112
|
+
@staticmethod
|
|
113
|
+
def _initialize_tracer(api_key: str = None, enable_tracing: bool = False):
|
|
114
|
+
if enable_tracing:
|
|
115
|
+
resource = Resource(
|
|
116
|
+
attributes={ResourceAttributes.SERVICE_NAME: "prompt-layer-library"}
|
|
117
|
+
)
|
|
118
|
+
tracer_provider = TracerProvider(resource=resource)
|
|
119
|
+
promptlayer_exporter = PromptLayerSpanExporter(api_key=api_key)
|
|
120
|
+
span_processor = BatchSpanProcessor(promptlayer_exporter)
|
|
121
|
+
tracer_provider.add_span_processor(span_processor)
|
|
122
|
+
tracer = tracer_provider.get_tracer(__name__)
|
|
123
|
+
return tracer_provider, tracer
|
|
124
|
+
else:
|
|
125
|
+
return None, None
|
|
126
|
+
|
|
127
|
+
@staticmethod
|
|
128
|
+
def _prepare_get_prompt_template_params(
|
|
129
|
+
*, prompt_version, prompt_release_label, input_variables, metadata
|
|
130
|
+
):
|
|
131
|
+
params = {}
|
|
132
|
+
|
|
133
|
+
if prompt_version:
|
|
134
|
+
params["version"] = prompt_version
|
|
135
|
+
if prompt_release_label:
|
|
136
|
+
params["label"] = prompt_release_label
|
|
137
|
+
if input_variables:
|
|
138
|
+
params["input_variables"] = input_variables
|
|
139
|
+
if metadata:
|
|
140
|
+
params["metadata_filters"] = metadata
|
|
141
|
+
|
|
142
|
+
return params
|
|
143
|
+
|
|
144
|
+
@staticmethod
|
|
145
|
+
def _prepare_llm_request_params(
|
|
146
|
+
*,
|
|
147
|
+
prompt_blueprint,
|
|
148
|
+
prompt_template,
|
|
149
|
+
prompt_blueprint_model,
|
|
150
|
+
model_parameter_overrides,
|
|
151
|
+
stream,
|
|
152
|
+
is_async=False,
|
|
153
|
+
):
|
|
154
|
+
provider = prompt_blueprint_model["provider"]
|
|
155
|
+
kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
|
|
156
|
+
if is_async:
|
|
157
|
+
config = AMAP_PROVIDER_TO_FUNCTION_NAME[provider][prompt_template["type"]]
|
|
158
|
+
request_function = AMAP_PROVIDER_TO_FUNCTION[provider]
|
|
159
|
+
else:
|
|
160
|
+
config = MAP_PROVIDER_TO_FUNCTION_NAME[provider][prompt_template["type"]]
|
|
161
|
+
request_function = MAP_PROVIDER_TO_FUNCTION[provider]
|
|
162
|
+
|
|
163
|
+
if provider_base_url := prompt_blueprint.get("provider_base_url"):
|
|
164
|
+
kwargs["base_url"] = provider_base_url["url"]
|
|
165
|
+
|
|
166
|
+
if model_parameter_overrides:
|
|
167
|
+
kwargs.update(model_parameter_overrides)
|
|
168
|
+
|
|
169
|
+
kwargs["stream"] = stream
|
|
170
|
+
if stream and provider in ["openai", "openai.azure"]:
|
|
171
|
+
kwargs["stream_options"] = {"include_usage": True}
|
|
172
|
+
|
|
173
|
+
return {
|
|
174
|
+
"provider": provider,
|
|
175
|
+
"function_name": config["function_name"],
|
|
176
|
+
"stream_function": config["stream_function"],
|
|
177
|
+
"request_function": request_function,
|
|
178
|
+
"kwargs": kwargs,
|
|
179
|
+
"prompt_blueprint": prompt_blueprint,
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
@staticmethod
|
|
183
|
+
def _validate_and_extract_model_from_prompt_blueprint(
|
|
184
|
+
*, prompt_blueprint, prompt_name
|
|
185
|
+
):
|
|
186
|
+
if not prompt_blueprint["llm_kwargs"]:
|
|
187
|
+
raise ValueError(
|
|
188
|
+
f"Prompt '{prompt_name}' does not have any LLM kwargs associated with it."
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
prompt_blueprint_metadata = prompt_blueprint.get("metadata")
|
|
192
|
+
|
|
193
|
+
if not prompt_blueprint_metadata:
|
|
194
|
+
raise ValueError(
|
|
195
|
+
f"Prompt '{prompt_name}' does not have any metadata associated with it."
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
prompt_blueprint_model = prompt_blueprint_metadata.get("model")
|
|
199
|
+
|
|
200
|
+
if not prompt_blueprint_model:
|
|
201
|
+
raise ValueError(
|
|
202
|
+
f"Prompt '{prompt_name}' does not have a model parameters associated with it."
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
return prompt_blueprint_model
|
|
206
|
+
|
|
207
|
+
@staticmethod
|
|
208
|
+
def _prepare_track_request_kwargs(
|
|
209
|
+
api_key,
|
|
210
|
+
request_params,
|
|
211
|
+
tags,
|
|
212
|
+
input_variables,
|
|
213
|
+
group_id,
|
|
214
|
+
pl_run_span_id: Union[str, None] = None,
|
|
215
|
+
metadata: Union[Dict[str, str], None] = None,
|
|
216
|
+
**body,
|
|
217
|
+
):
|
|
218
|
+
return {
|
|
219
|
+
"function_name": request_params["function_name"],
|
|
220
|
+
"provider_type": request_params["provider"],
|
|
221
|
+
"args": [],
|
|
222
|
+
"kwargs": request_params["kwargs"],
|
|
223
|
+
"tags": tags,
|
|
224
|
+
"request_start_time": datetime.datetime.now(
|
|
225
|
+
datetime.timezone.utc
|
|
226
|
+
).timestamp(),
|
|
227
|
+
"request_end_time": datetime.datetime.now(
|
|
228
|
+
datetime.timezone.utc
|
|
229
|
+
).timestamp(),
|
|
230
|
+
"api_key": api_key,
|
|
231
|
+
"metadata": metadata,
|
|
232
|
+
"prompt_id": request_params["prompt_blueprint"]["id"],
|
|
233
|
+
"prompt_version": request_params["prompt_blueprint"]["version"],
|
|
234
|
+
"prompt_input_variables": input_variables,
|
|
235
|
+
"group_id": group_id,
|
|
236
|
+
"return_prompt_blueprint": True,
|
|
237
|
+
"span_id": pl_run_span_id,
|
|
238
|
+
**body,
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
def traceable(self, attributes=None, name=None):
|
|
242
|
+
def decorator(func):
|
|
243
|
+
@wraps(func)
|
|
244
|
+
def sync_wrapper(*args, **kwargs):
|
|
245
|
+
if self.tracer:
|
|
246
|
+
span_name = name or func.__name__
|
|
247
|
+
with self.tracer.start_as_current_span(span_name) as span:
|
|
248
|
+
if attributes:
|
|
249
|
+
for key, value in attributes.items():
|
|
250
|
+
span.set_attribute(key, value)
|
|
251
|
+
|
|
252
|
+
span.set_attribute(
|
|
253
|
+
"function_input", str({"args": args, "kwargs": kwargs})
|
|
254
|
+
)
|
|
255
|
+
result = func(*args, **kwargs)
|
|
256
|
+
span.set_attribute("function_output", str(result))
|
|
257
|
+
|
|
258
|
+
return result
|
|
259
|
+
else:
|
|
260
|
+
return func(*args, **kwargs)
|
|
261
|
+
|
|
262
|
+
@wraps(func)
|
|
263
|
+
async def async_wrapper(*args, **kwargs):
|
|
264
|
+
if self.tracer:
|
|
265
|
+
span_name = name or func.__name__
|
|
266
|
+
with self.tracer.start_as_current_span(span_name) as span:
|
|
267
|
+
if attributes:
|
|
268
|
+
for key, value in attributes.items():
|
|
269
|
+
span.set_attribute(key, value)
|
|
270
|
+
|
|
271
|
+
span.set_attribute(
|
|
272
|
+
"function_input", str({"args": args, "kwargs": kwargs})
|
|
273
|
+
)
|
|
274
|
+
result = await func(*args, **kwargs)
|
|
275
|
+
span.set_attribute("function_output", str(result))
|
|
276
|
+
|
|
277
|
+
return result
|
|
278
|
+
else:
|
|
279
|
+
return await func(*args, **kwargs)
|
|
280
|
+
|
|
281
|
+
return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
|
|
282
|
+
|
|
283
|
+
return decorator
|
|
@@ -28,7 +28,23 @@ class ImageContent(TypedDict, total=False):
|
|
|
28
28
|
image_url: ImageUrl
|
|
29
29
|
|
|
30
30
|
|
|
31
|
-
|
|
31
|
+
class Media(TypedDict, total=False):
|
|
32
|
+
title: str
|
|
33
|
+
type: str
|
|
34
|
+
url: str
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class MediaContnt(TypedDict, total=False):
|
|
38
|
+
type: Literal["media"]
|
|
39
|
+
media: Media
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class MediaVariable(TypedDict, total=False):
|
|
43
|
+
type: Literal["media_variable"]
|
|
44
|
+
name: str
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
Content = Union[TextContent, ImageContent, MediaContnt, MediaVariable]
|
|
32
48
|
|
|
33
49
|
|
|
34
50
|
class Function(TypedDict, total=False):
|
promptlayer/utils.py
CHANGED
|
@@ -2,13 +2,24 @@ import asyncio
|
|
|
2
2
|
import contextvars
|
|
3
3
|
import datetime
|
|
4
4
|
import functools
|
|
5
|
+
import inspect
|
|
5
6
|
import json
|
|
6
7
|
import os
|
|
7
8
|
import sys
|
|
8
9
|
import types
|
|
9
10
|
from copy import deepcopy
|
|
10
11
|
from enum import Enum
|
|
11
|
-
from typing import
|
|
12
|
+
from typing import (
|
|
13
|
+
Any,
|
|
14
|
+
AsyncGenerator,
|
|
15
|
+
AsyncIterable,
|
|
16
|
+
Callable,
|
|
17
|
+
Dict,
|
|
18
|
+
Generator,
|
|
19
|
+
List,
|
|
20
|
+
Optional,
|
|
21
|
+
Union,
|
|
22
|
+
)
|
|
12
23
|
|
|
13
24
|
import httpx
|
|
14
25
|
import requests
|
|
@@ -311,6 +322,27 @@ def track_request(**body):
|
|
|
311
322
|
return {}
|
|
312
323
|
|
|
313
324
|
|
|
325
|
+
async def atrack_request(**body: Any) -> Dict[str, Any]:
|
|
326
|
+
try:
|
|
327
|
+
async with httpx.AsyncClient() as client:
|
|
328
|
+
response = await client.post(
|
|
329
|
+
f"{URL_API_PROMPTLAYER}/track-request",
|
|
330
|
+
json=body,
|
|
331
|
+
)
|
|
332
|
+
if response.status_code != 200:
|
|
333
|
+
warn_on_bad_response(
|
|
334
|
+
response,
|
|
335
|
+
f"PromptLayer had the following error while tracking your request: {response.text}",
|
|
336
|
+
)
|
|
337
|
+
return response.json()
|
|
338
|
+
except httpx.RequestError as e:
|
|
339
|
+
print(
|
|
340
|
+
f"WARNING: While logging your request PromptLayer had the following error: {e}",
|
|
341
|
+
file=sys.stderr,
|
|
342
|
+
)
|
|
343
|
+
return {}
|
|
344
|
+
|
|
345
|
+
|
|
314
346
|
def promptlayer_api_request_async(
|
|
315
347
|
function_name,
|
|
316
348
|
provider_type,
|
|
@@ -1136,6 +1168,81 @@ def openai_stream_chat(results: list):
|
|
|
1136
1168
|
return response
|
|
1137
1169
|
|
|
1138
1170
|
|
|
1171
|
+
async def aopenai_stream_chat(generator: AsyncIterable[Any]) -> Any:
|
|
1172
|
+
from openai.types.chat import (
|
|
1173
|
+
ChatCompletion,
|
|
1174
|
+
ChatCompletionChunk,
|
|
1175
|
+
ChatCompletionMessage,
|
|
1176
|
+
ChatCompletionMessageToolCall,
|
|
1177
|
+
)
|
|
1178
|
+
from openai.types.chat.chat_completion import Choice
|
|
1179
|
+
from openai.types.chat.chat_completion_message_tool_call import Function
|
|
1180
|
+
|
|
1181
|
+
chat_completion_chunks: List[ChatCompletionChunk] = []
|
|
1182
|
+
response: ChatCompletion = ChatCompletion(
|
|
1183
|
+
id="",
|
|
1184
|
+
object="chat.completion",
|
|
1185
|
+
choices=[
|
|
1186
|
+
Choice(
|
|
1187
|
+
finish_reason="stop",
|
|
1188
|
+
index=0,
|
|
1189
|
+
message=ChatCompletionMessage(role="assistant"),
|
|
1190
|
+
)
|
|
1191
|
+
],
|
|
1192
|
+
created=0,
|
|
1193
|
+
model="",
|
|
1194
|
+
)
|
|
1195
|
+
content = ""
|
|
1196
|
+
tool_calls: Union[List[ChatCompletionMessageToolCall], None] = None
|
|
1197
|
+
|
|
1198
|
+
async for result in generator:
|
|
1199
|
+
chat_completion_chunks.append(result)
|
|
1200
|
+
choices = result.choices
|
|
1201
|
+
if len(choices) == 0:
|
|
1202
|
+
continue
|
|
1203
|
+
if choices[0].delta.content:
|
|
1204
|
+
content = f"{content}{choices[0].delta.content}"
|
|
1205
|
+
|
|
1206
|
+
delta = choices[0].delta
|
|
1207
|
+
if delta.tool_calls:
|
|
1208
|
+
tool_calls = tool_calls or []
|
|
1209
|
+
last_tool_call = None
|
|
1210
|
+
if len(tool_calls) > 0:
|
|
1211
|
+
last_tool_call = tool_calls[-1]
|
|
1212
|
+
tool_call = delta.tool_calls[0]
|
|
1213
|
+
if not tool_call.function:
|
|
1214
|
+
continue
|
|
1215
|
+
if not last_tool_call or tool_call.id:
|
|
1216
|
+
tool_calls.append(
|
|
1217
|
+
ChatCompletionMessageToolCall(
|
|
1218
|
+
id=tool_call.id or "",
|
|
1219
|
+
function=Function(
|
|
1220
|
+
name=tool_call.function.name or "",
|
|
1221
|
+
arguments=tool_call.function.arguments or "",
|
|
1222
|
+
),
|
|
1223
|
+
type=tool_call.type or "function",
|
|
1224
|
+
)
|
|
1225
|
+
)
|
|
1226
|
+
continue
|
|
1227
|
+
last_tool_call.function.name = (
|
|
1228
|
+
f"{last_tool_call.function.name}{tool_call.function.name or ''}"
|
|
1229
|
+
)
|
|
1230
|
+
last_tool_call.function.arguments = f"{last_tool_call.function.arguments}{tool_call.function.arguments or ''}"
|
|
1231
|
+
|
|
1232
|
+
# After collecting all chunks, set the response attributes
|
|
1233
|
+
if chat_completion_chunks:
|
|
1234
|
+
last_result = chat_completion_chunks[-1]
|
|
1235
|
+
response.id = last_result.id
|
|
1236
|
+
response.created = last_result.created
|
|
1237
|
+
response.model = last_result.model
|
|
1238
|
+
response.system_fingerprint = getattr(last_result, "system_fingerprint", None)
|
|
1239
|
+
response.usage = last_result.usage
|
|
1240
|
+
|
|
1241
|
+
response.choices[0].message.content = content
|
|
1242
|
+
response.choices[0].message.tool_calls = tool_calls
|
|
1243
|
+
return response
|
|
1244
|
+
|
|
1245
|
+
|
|
1139
1246
|
def openai_stream_completion(results: list):
|
|
1140
1247
|
from openai.types.completion import Completion, CompletionChoice
|
|
1141
1248
|
|
|
@@ -1162,6 +1269,41 @@ def openai_stream_completion(results: list):
|
|
|
1162
1269
|
return response
|
|
1163
1270
|
|
|
1164
1271
|
|
|
1272
|
+
async def aopenai_stream_completion(generator: AsyncIterable[Any]) -> Any:
|
|
1273
|
+
from openai.types.completion import Completion, CompletionChoice
|
|
1274
|
+
|
|
1275
|
+
completions: List[Completion] = []
|
|
1276
|
+
text = ""
|
|
1277
|
+
response = Completion(
|
|
1278
|
+
id="",
|
|
1279
|
+
created=0,
|
|
1280
|
+
model="",
|
|
1281
|
+
object="text_completion",
|
|
1282
|
+
choices=[CompletionChoice(finish_reason="stop", index=0, text="")],
|
|
1283
|
+
)
|
|
1284
|
+
|
|
1285
|
+
async for completion in generator:
|
|
1286
|
+
completions.append(completion)
|
|
1287
|
+
usage = completion.usage
|
|
1288
|
+
system_fingerprint = getattr(completion, "system_fingerprint", None)
|
|
1289
|
+
if len(completion.choices) > 0 and completion.choices[0].text:
|
|
1290
|
+
text = f"{text}{completion.choices[0].text}"
|
|
1291
|
+
if usage:
|
|
1292
|
+
response.usage = usage
|
|
1293
|
+
if system_fingerprint:
|
|
1294
|
+
response.system_fingerprint = system_fingerprint
|
|
1295
|
+
|
|
1296
|
+
# After collecting all completions, set the response attributes
|
|
1297
|
+
if completions:
|
|
1298
|
+
last_chunk = completions[-1]
|
|
1299
|
+
response.id = last_chunk.id
|
|
1300
|
+
response.created = last_chunk.created
|
|
1301
|
+
response.model = last_chunk.model
|
|
1302
|
+
|
|
1303
|
+
response.choices[0].text = text
|
|
1304
|
+
return response
|
|
1305
|
+
|
|
1306
|
+
|
|
1165
1307
|
def anthropic_stream_message(results: list):
|
|
1166
1308
|
from anthropic.types import Message, MessageStreamEvent, TextBlock, Usage
|
|
1167
1309
|
|
|
@@ -1192,6 +1334,39 @@ def anthropic_stream_message(results: list):
|
|
|
1192
1334
|
return response
|
|
1193
1335
|
|
|
1194
1336
|
|
|
1337
|
+
async def aanthropic_stream_message(generator: AsyncIterable[Any]) -> Any:
|
|
1338
|
+
from anthropic.types import Message, MessageStreamEvent, TextBlock, Usage
|
|
1339
|
+
|
|
1340
|
+
message_stream_events: List[MessageStreamEvent] = []
|
|
1341
|
+
response: Message = Message(
|
|
1342
|
+
id="",
|
|
1343
|
+
model="",
|
|
1344
|
+
content=[],
|
|
1345
|
+
role="assistant",
|
|
1346
|
+
type="message",
|
|
1347
|
+
stop_reason="stop_sequence",
|
|
1348
|
+
stop_sequence=None,
|
|
1349
|
+
usage=Usage(input_tokens=0, output_tokens=0),
|
|
1350
|
+
)
|
|
1351
|
+
content = ""
|
|
1352
|
+
|
|
1353
|
+
async for result in generator:
|
|
1354
|
+
message_stream_events.append(result)
|
|
1355
|
+
if result.type == "message_start":
|
|
1356
|
+
response = result.message
|
|
1357
|
+
elif result.type == "content_block_delta":
|
|
1358
|
+
if result.delta.type == "text_delta":
|
|
1359
|
+
content = f"{content}{result.delta.text}"
|
|
1360
|
+
elif result.type == "message_delta":
|
|
1361
|
+
if hasattr(result, "usage"):
|
|
1362
|
+
response.usage.output_tokens = result.usage.output_tokens
|
|
1363
|
+
if hasattr(result.delta, "stop_reason"):
|
|
1364
|
+
response.stop_reason = result.delta.stop_reason
|
|
1365
|
+
|
|
1366
|
+
response.content.append(TextBlock(type="text", text=content))
|
|
1367
|
+
return response
|
|
1368
|
+
|
|
1369
|
+
|
|
1195
1370
|
def anthropic_stream_completion(results: list):
|
|
1196
1371
|
from anthropic.types import Completion
|
|
1197
1372
|
|
|
@@ -1212,6 +1387,33 @@ def anthropic_stream_completion(results: list):
|
|
|
1212
1387
|
return response
|
|
1213
1388
|
|
|
1214
1389
|
|
|
1390
|
+
async def aanthropic_stream_completion(generator: AsyncIterable[Any]) -> Any:
|
|
1391
|
+
from anthropic.types import Completion
|
|
1392
|
+
|
|
1393
|
+
completions: List[Completion] = []
|
|
1394
|
+
text = ""
|
|
1395
|
+
response = Completion(
|
|
1396
|
+
id="",
|
|
1397
|
+
completion="",
|
|
1398
|
+
model="",
|
|
1399
|
+
stop_reason="stop",
|
|
1400
|
+
type="completion",
|
|
1401
|
+
)
|
|
1402
|
+
|
|
1403
|
+
async for completion in generator:
|
|
1404
|
+
completions.append(completion)
|
|
1405
|
+
text = f"{text}{completion.completion}"
|
|
1406
|
+
|
|
1407
|
+
# After collecting all completions, set the response attributes
|
|
1408
|
+
if completions:
|
|
1409
|
+
last_chunk = completions[-1]
|
|
1410
|
+
response.id = last_chunk.id
|
|
1411
|
+
response.model = last_chunk.model
|
|
1412
|
+
|
|
1413
|
+
response.completion = text
|
|
1414
|
+
return response
|
|
1415
|
+
|
|
1416
|
+
|
|
1215
1417
|
def stream_response(
|
|
1216
1418
|
generator: Generator, after_stream: Callable, map_results: Callable
|
|
1217
1419
|
):
|
|
@@ -1232,6 +1434,33 @@ def stream_response(
|
|
|
1232
1434
|
yield data
|
|
1233
1435
|
|
|
1234
1436
|
|
|
1437
|
+
async def astream_response(
|
|
1438
|
+
generator: AsyncIterable[Any],
|
|
1439
|
+
after_stream: Callable[..., Any],
|
|
1440
|
+
map_results: Callable[[Any], Any],
|
|
1441
|
+
) -> AsyncGenerator[Dict[str, Any], None]:
|
|
1442
|
+
data = {
|
|
1443
|
+
"request_id": None,
|
|
1444
|
+
"raw_response": None,
|
|
1445
|
+
"prompt_blueprint": None,
|
|
1446
|
+
}
|
|
1447
|
+
results = []
|
|
1448
|
+
async for result in generator:
|
|
1449
|
+
results.append(result)
|
|
1450
|
+
data["raw_response"] = result
|
|
1451
|
+
yield data
|
|
1452
|
+
request_response = await map_results(results)
|
|
1453
|
+
if inspect.iscoroutinefunction(after_stream):
|
|
1454
|
+
# after_stream is an async function
|
|
1455
|
+
response = await after_stream(request_response=request_response.model_dump())
|
|
1456
|
+
else:
|
|
1457
|
+
# after_stream is synchronous
|
|
1458
|
+
response = after_stream(request_response=request_response.model_dump())
|
|
1459
|
+
data["request_id"] = response.get("request_id")
|
|
1460
|
+
data["prompt_blueprint"] = response.get("prompt_blueprint")
|
|
1461
|
+
yield data
|
|
1462
|
+
|
|
1463
|
+
|
|
1235
1464
|
def openai_chat_request(client, **kwargs):
|
|
1236
1465
|
return client.chat.completions.create(**kwargs)
|
|
1237
1466
|
|
|
@@ -1256,6 +1485,30 @@ def openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
|
|
|
1256
1485
|
return request_to_make(client, **kwargs)
|
|
1257
1486
|
|
|
1258
1487
|
|
|
1488
|
+
async def aopenai_chat_request(client, **kwargs):
|
|
1489
|
+
return await client.chat.completions.create(**kwargs)
|
|
1490
|
+
|
|
1491
|
+
|
|
1492
|
+
async def aopenai_completions_request(client, **kwargs):
|
|
1493
|
+
return await client.completions.create(**kwargs)
|
|
1494
|
+
|
|
1495
|
+
|
|
1496
|
+
AMAP_TYPE_TO_OPENAI_FUNCTION = {
|
|
1497
|
+
"chat": aopenai_chat_request,
|
|
1498
|
+
"completion": aopenai_completions_request,
|
|
1499
|
+
}
|
|
1500
|
+
|
|
1501
|
+
|
|
1502
|
+
async def aopenai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
|
|
1503
|
+
from openai import AsyncOpenAI
|
|
1504
|
+
|
|
1505
|
+
client = AsyncOpenAI(base_url=kwargs.pop("base_url", None))
|
|
1506
|
+
request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[
|
|
1507
|
+
prompt_blueprint["prompt_template"]["type"]
|
|
1508
|
+
]
|
|
1509
|
+
return await request_to_make(client, **kwargs)
|
|
1510
|
+
|
|
1511
|
+
|
|
1259
1512
|
def azure_openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
|
|
1260
1513
|
from openai import AzureOpenAI
|
|
1261
1514
|
|
|
@@ -1266,6 +1519,16 @@ def azure_openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
|
|
|
1266
1519
|
return request_to_make(client, **kwargs)
|
|
1267
1520
|
|
|
1268
1521
|
|
|
1522
|
+
async def aazure_openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
|
|
1523
|
+
from openai import AsyncAzureOpenAI
|
|
1524
|
+
|
|
1525
|
+
client = AsyncAzureOpenAI(azure_endpoint=kwargs.pop("base_url", None))
|
|
1526
|
+
request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[
|
|
1527
|
+
prompt_blueprint["prompt_template"]["type"]
|
|
1528
|
+
]
|
|
1529
|
+
return await request_to_make(client, **kwargs)
|
|
1530
|
+
|
|
1531
|
+
|
|
1269
1532
|
def anthropic_chat_request(client, **kwargs):
|
|
1270
1533
|
return client.messages.create(**kwargs)
|
|
1271
1534
|
|
|
@@ -1290,6 +1553,30 @@ def anthropic_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
|
|
|
1290
1553
|
return request_to_make(client, **kwargs)
|
|
1291
1554
|
|
|
1292
1555
|
|
|
1556
|
+
async def aanthropic_chat_request(client, **kwargs):
|
|
1557
|
+
return await client.messages.create(**kwargs)
|
|
1558
|
+
|
|
1559
|
+
|
|
1560
|
+
async def aanthropic_completions_request(client, **kwargs):
|
|
1561
|
+
return await client.completions.create(**kwargs)
|
|
1562
|
+
|
|
1563
|
+
|
|
1564
|
+
AMAP_TYPE_TO_ANTHROPIC_FUNCTION = {
|
|
1565
|
+
"chat": aanthropic_chat_request,
|
|
1566
|
+
"completion": aanthropic_completions_request,
|
|
1567
|
+
}
|
|
1568
|
+
|
|
1569
|
+
|
|
1570
|
+
async def aanthropic_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
|
|
1571
|
+
from anthropic import AsyncAnthropic
|
|
1572
|
+
|
|
1573
|
+
client = AsyncAnthropic(base_url=kwargs.pop("base_url", None))
|
|
1574
|
+
request_to_make = AMAP_TYPE_TO_ANTHROPIC_FUNCTION[
|
|
1575
|
+
prompt_blueprint["prompt_template"]["type"]
|
|
1576
|
+
]
|
|
1577
|
+
return await request_to_make(client, **kwargs)
|
|
1578
|
+
|
|
1579
|
+
|
|
1293
1580
|
# do not remove! This is used in the langchain integration.
|
|
1294
1581
|
def get_api_key():
|
|
1295
1582
|
# raise an error if the api key is not set
|
|
@@ -1,17 +1,18 @@
|
|
|
1
|
-
promptlayer/__init__.py,sha256=
|
|
1
|
+
promptlayer/__init__.py,sha256=ZWwec3lasIs6PTQi77sYwX746yWKAQc2p8Lg8fLZrO4,140
|
|
2
2
|
promptlayer/groups/__init__.py,sha256=xhOAolLUBkr76ZHvJr29OwjCIk1V9qKQXjZCuyTJUIY,429
|
|
3
3
|
promptlayer/groups/groups.py,sha256=YPROicy-TzpkrpA8vOpZS2lwvJ6VRtlbQ1S2oT1N0vM,338
|
|
4
|
-
promptlayer/promptlayer.py,sha256=
|
|
4
|
+
promptlayer/promptlayer.py,sha256=ZzHLMwbF-qq9SXNb610tmAOnGzOEvugGjHp3MtGWMGA,20083
|
|
5
5
|
promptlayer/promptlayer_base.py,sha256=sev-EZehRXJSZSmJtMkqmAUK1345pqbDY_lNjPP5MYA,7158
|
|
6
|
+
promptlayer/promptlayer_mixins.py,sha256=CZRX-kjd067JLzxd0qOWBnOCHdC5CZ4bXFDs7CDMplg,9572
|
|
6
7
|
promptlayer/span_exporter.py,sha256=zIJNsb3Fe6yb5wKLDmkoPF2wqFjk1p39E0jWHD2plzI,2658
|
|
7
8
|
promptlayer/templates.py,sha256=bdX8ZxydWwF9QMF1UBD-qoYqYRPrUSTAt88r2D8ws7c,1193
|
|
8
9
|
promptlayer/track/__init__.py,sha256=8J258daTXb_P8eHRbYR2Au1lJzTh_92UkOHf7q0NpKs,1757
|
|
9
10
|
promptlayer/track/track.py,sha256=UdkCxhWUvhvPdhsoHj4qmeiRq6xLcWmeIdYXrgZph04,3252
|
|
10
11
|
promptlayer/types/__init__.py,sha256=xJcvQuOk91ZBBePb40-1FDNDKYrZoH5lPE2q6_UhprM,111
|
|
11
|
-
promptlayer/types/prompt_template.py,sha256=
|
|
12
|
+
promptlayer/types/prompt_template.py,sha256=sv5iMV8Iix2LCMUSMTkfb8PohfKp-vR9hRMck8OXSVc,4728
|
|
12
13
|
promptlayer/types/request_log.py,sha256=xU6bcxQar6GaBOJlgZTavXUV3FjE8sF_nSjPu4Ya_00,174
|
|
13
|
-
promptlayer/utils.py,sha256=
|
|
14
|
-
promptlayer-1.0.
|
|
15
|
-
promptlayer-1.0.
|
|
16
|
-
promptlayer-1.0.
|
|
17
|
-
promptlayer-1.0.
|
|
14
|
+
promptlayer/utils.py,sha256=yPEzt3JZT7rbVch39Gv8Uk0-CR8Ih5Ym9iIfS0VEEH8,53960
|
|
15
|
+
promptlayer-1.0.32.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
16
|
+
promptlayer-1.0.32.dist-info/METADATA,sha256=5TUZRxITDb2RVq9j9xdHlK8BHTFzp6_HNJjPhK3uKk8,4824
|
|
17
|
+
promptlayer-1.0.32.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
|
18
|
+
promptlayer-1.0.32.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|