promptlayer 1.0.37__tar.gz → 1.0.38__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of promptlayer might be problematic. Click here for more details.
- {promptlayer-1.0.37 → promptlayer-1.0.38}/PKG-INFO +8 -8
- {promptlayer-1.0.37 → promptlayer-1.0.38}/README.md +7 -7
- {promptlayer-1.0.37 → promptlayer-1.0.38}/promptlayer/__init__.py +1 -1
- {promptlayer-1.0.37 → promptlayer-1.0.38}/promptlayer/promptlayer.py +9 -29
- {promptlayer-1.0.37 → promptlayer-1.0.38}/promptlayer/promptlayer_base.py +12 -31
- {promptlayer-1.0.37 → promptlayer-1.0.38}/promptlayer/promptlayer_mixins.py +10 -30
- {promptlayer-1.0.37 → promptlayer-1.0.38}/promptlayer/span_exporter.py +3 -10
- {promptlayer-1.0.37 → promptlayer-1.0.38}/promptlayer/templates.py +1 -3
- {promptlayer-1.0.37 → promptlayer-1.0.38}/promptlayer/track/__init__.py +14 -10
- {promptlayer-1.0.37 → promptlayer-1.0.38}/promptlayer/track/track.py +4 -12
- {promptlayer-1.0.37 → promptlayer-1.0.38}/promptlayer/utils.py +51 -120
- {promptlayer-1.0.37 → promptlayer-1.0.38}/pyproject.toml +20 -1
- {promptlayer-1.0.37 → promptlayer-1.0.38}/LICENSE +0 -0
- {promptlayer-1.0.37 → promptlayer-1.0.38}/promptlayer/groups/__init__.py +0 -0
- {promptlayer-1.0.37 → promptlayer-1.0.38}/promptlayer/groups/groups.py +0 -0
- {promptlayer-1.0.37 → promptlayer-1.0.38}/promptlayer/types/__init__.py +0 -0
- {promptlayer-1.0.37 → promptlayer-1.0.38}/promptlayer/types/prompt_template.py +0 -0
- {promptlayer-1.0.37 → promptlayer-1.0.38}/promptlayer/types/request_log.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: promptlayer
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.38
|
|
4
4
|
Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
|
|
5
5
|
License: Apache-2.0
|
|
6
6
|
Author: Magniv
|
|
@@ -32,11 +32,11 @@ Description-Content-Type: text/markdown
|
|
|
32
32
|
<a href="https://docs.promptlayer.com"><img alt="Docs" src="https://custom-icon-badges.herokuapp.com/badge/docs-PL-green.svg?logo=cake&style=for-the-badge"></a>
|
|
33
33
|
<a href="https://www.loom.com/share/196c42e43acd4a369d75e9a7374a0850"><img alt="Demo with Loom" src="https://img.shields.io/badge/Demo-loom-552586.svg?logo=loom&style=for-the-badge&labelColor=gray"></a>
|
|
34
34
|
|
|
35
|
-
---
|
|
35
|
+
---
|
|
36
36
|
|
|
37
37
|
<div align="left">
|
|
38
38
|
|
|
39
|
-
[PromptLayer](https://promptlayer.com/) is the first platform that allows you to track, manage, and share your GPT prompt engineering. PromptLayer acts a middleware between your code and OpenAI’s python library.
|
|
39
|
+
[PromptLayer](https://promptlayer.com/) is the first platform that allows you to track, manage, and share your GPT prompt engineering. PromptLayer acts a middleware between your code and OpenAI’s python library.
|
|
40
40
|
|
|
41
41
|
PromptLayer records all your OpenAI API requests, allowing you to search and explore request history in the PromptLayer dashboard.
|
|
42
42
|
|
|
@@ -77,14 +77,14 @@ openai = promptlayer.openai
|
|
|
77
77
|
|
|
78
78
|
### Adding PromptLayer tags: `pl_tags`
|
|
79
79
|
|
|
80
|
-
PromptLayer allows you to add tags through the `pl_tags` argument. This allows you to track and group requests in the dashboard.
|
|
80
|
+
PromptLayer allows you to add tags through the `pl_tags` argument. This allows you to track and group requests in the dashboard.
|
|
81
81
|
|
|
82
82
|
*Tags are not required but we recommend them!*
|
|
83
83
|
|
|
84
84
|
```python
|
|
85
85
|
openai.Completion.create(
|
|
86
|
-
engine="text-ada-001",
|
|
87
|
-
prompt="My name is",
|
|
86
|
+
engine="text-ada-001",
|
|
87
|
+
prompt="My name is",
|
|
88
88
|
pl_tags=["name-guessing", "pipeline-2"]
|
|
89
89
|
)
|
|
90
90
|
```
|
|
@@ -93,11 +93,11 @@ After making your first few requests, you should be able to see them in the Prom
|
|
|
93
93
|
|
|
94
94
|
## Using the REST API
|
|
95
95
|
|
|
96
|
-
This Python library is a wrapper over PromptLayer's REST API. If you use another language, like Javascript, just interact directly with the API.
|
|
96
|
+
This Python library is a wrapper over PromptLayer's REST API. If you use another language, like Javascript, just interact directly with the API.
|
|
97
97
|
|
|
98
98
|
Here is an example request below:
|
|
99
99
|
|
|
100
|
-
```
|
|
100
|
+
```python
|
|
101
101
|
import requests
|
|
102
102
|
request_response = requests.post(
|
|
103
103
|
"https://api.promptlayer.com/track-request",
|
|
@@ -8,11 +8,11 @@
|
|
|
8
8
|
<a href="https://docs.promptlayer.com"><img alt="Docs" src="https://custom-icon-badges.herokuapp.com/badge/docs-PL-green.svg?logo=cake&style=for-the-badge"></a>
|
|
9
9
|
<a href="https://www.loom.com/share/196c42e43acd4a369d75e9a7374a0850"><img alt="Demo with Loom" src="https://img.shields.io/badge/Demo-loom-552586.svg?logo=loom&style=for-the-badge&labelColor=gray"></a>
|
|
10
10
|
|
|
11
|
-
---
|
|
11
|
+
---
|
|
12
12
|
|
|
13
13
|
<div align="left">
|
|
14
14
|
|
|
15
|
-
[PromptLayer](https://promptlayer.com/) is the first platform that allows you to track, manage, and share your GPT prompt engineering. PromptLayer acts a middleware between your code and OpenAI’s python library.
|
|
15
|
+
[PromptLayer](https://promptlayer.com/) is the first platform that allows you to track, manage, and share your GPT prompt engineering. PromptLayer acts a middleware between your code and OpenAI’s python library.
|
|
16
16
|
|
|
17
17
|
PromptLayer records all your OpenAI API requests, allowing you to search and explore request history in the PromptLayer dashboard.
|
|
18
18
|
|
|
@@ -53,14 +53,14 @@ openai = promptlayer.openai
|
|
|
53
53
|
|
|
54
54
|
### Adding PromptLayer tags: `pl_tags`
|
|
55
55
|
|
|
56
|
-
PromptLayer allows you to add tags through the `pl_tags` argument. This allows you to track and group requests in the dashboard.
|
|
56
|
+
PromptLayer allows you to add tags through the `pl_tags` argument. This allows you to track and group requests in the dashboard.
|
|
57
57
|
|
|
58
58
|
*Tags are not required but we recommend them!*
|
|
59
59
|
|
|
60
60
|
```python
|
|
61
61
|
openai.Completion.create(
|
|
62
|
-
engine="text-ada-001",
|
|
63
|
-
prompt="My name is",
|
|
62
|
+
engine="text-ada-001",
|
|
63
|
+
prompt="My name is",
|
|
64
64
|
pl_tags=["name-guessing", "pipeline-2"]
|
|
65
65
|
)
|
|
66
66
|
```
|
|
@@ -69,11 +69,11 @@ After making your first few requests, you should be able to see them in the Prom
|
|
|
69
69
|
|
|
70
70
|
## Using the REST API
|
|
71
71
|
|
|
72
|
-
This Python library is a wrapper over PromptLayer's REST API. If you use another language, like Javascript, just interact directly with the API.
|
|
72
|
+
This Python library is a wrapper over PromptLayer's REST API. If you use another language, like Javascript, just interact directly with the API.
|
|
73
73
|
|
|
74
74
|
Here is an example request below:
|
|
75
75
|
|
|
76
|
-
```
|
|
76
|
+
```python
|
|
77
77
|
import requests
|
|
78
78
|
request_response = requests.post(
|
|
79
79
|
"https://api.promptlayer.com/track-request",
|
|
@@ -61,9 +61,7 @@ class PromptLayer(PromptLayerMixin):
|
|
|
61
61
|
self.api_key = api_key
|
|
62
62
|
self.templates = TemplateManager(api_key)
|
|
63
63
|
self.group = GroupManager(api_key)
|
|
64
|
-
self.tracer_provider, self.tracer = self._initialize_tracer(
|
|
65
|
-
api_key, enable_tracing
|
|
66
|
-
)
|
|
64
|
+
self.tracer_provider, self.tracer = self._initialize_tracer(api_key, enable_tracing)
|
|
67
65
|
self.track = TrackManager(api_key)
|
|
68
66
|
|
|
69
67
|
def __getattr__(
|
|
@@ -233,9 +231,7 @@ class PromptLayer(PromptLayerMixin):
|
|
|
233
231
|
span.set_attribute("prompt_name", prompt_name)
|
|
234
232
|
span.set_attribute("function_input", str(_run_internal_kwargs))
|
|
235
233
|
pl_run_span_id = hex(span.context.span_id)[2:].zfill(16)
|
|
236
|
-
result = self._run_internal(
|
|
237
|
-
**_run_internal_kwargs, pl_run_span_id=pl_run_span_id
|
|
238
|
-
)
|
|
234
|
+
result = self._run_internal(**_run_internal_kwargs, pl_run_span_id=pl_run_span_id)
|
|
239
235
|
span.set_attribute("function_output", str(result))
|
|
240
236
|
return result
|
|
241
237
|
else:
|
|
@@ -285,18 +281,12 @@ class PromptLayer(PromptLayerMixin):
|
|
|
285
281
|
|
|
286
282
|
if not return_all_outputs:
|
|
287
283
|
if is_workflow_results_dict(results):
|
|
288
|
-
output_nodes = [
|
|
289
|
-
node_data
|
|
290
|
-
for node_data in results.values()
|
|
291
|
-
if node_data.get("is_output_node")
|
|
292
|
-
]
|
|
284
|
+
output_nodes = [node_data for node_data in results.values() if node_data.get("is_output_node")]
|
|
293
285
|
|
|
294
286
|
if not output_nodes:
|
|
295
287
|
raise Exception(json.dumps(results, indent=4))
|
|
296
288
|
|
|
297
|
-
if not any(
|
|
298
|
-
node.get("status") == "SUCCESS" for node in output_nodes
|
|
299
|
-
):
|
|
289
|
+
if not any(node.get("status") == "SUCCESS" for node in output_nodes):
|
|
300
290
|
raise Exception(json.dumps(results, indent=4))
|
|
301
291
|
|
|
302
292
|
return results
|
|
@@ -364,14 +354,10 @@ class AsyncPromptLayer(PromptLayerMixin):
|
|
|
364
354
|
self.api_key = api_key
|
|
365
355
|
self.templates = AsyncTemplateManager(api_key)
|
|
366
356
|
self.group = AsyncGroupManager(api_key)
|
|
367
|
-
self.tracer_provider, self.tracer = self._initialize_tracer(
|
|
368
|
-
api_key, enable_tracing
|
|
369
|
-
)
|
|
357
|
+
self.tracer_provider, self.tracer = self._initialize_tracer(api_key, enable_tracing)
|
|
370
358
|
self.track = AsyncTrackManager(api_key)
|
|
371
359
|
|
|
372
|
-
def __getattr__(
|
|
373
|
-
self, name: Union[Literal["openai"], Literal["anthropic"], Literal["prompts"]]
|
|
374
|
-
):
|
|
360
|
+
def __getattr__(self, name: Union[Literal["openai"], Literal["anthropic"], Literal["prompts"]]):
|
|
375
361
|
if name == "openai":
|
|
376
362
|
import openai as openai_module
|
|
377
363
|
|
|
@@ -400,9 +386,7 @@ class AsyncPromptLayer(PromptLayerMixin):
|
|
|
400
386
|
input_variables: Optional[Dict[str, Any]] = None,
|
|
401
387
|
metadata: Optional[Dict[str, str]] = None,
|
|
402
388
|
workflow_label_name: Optional[str] = None,
|
|
403
|
-
workflow_version: Optional[
|
|
404
|
-
int
|
|
405
|
-
] = None, # This is the version number, not the version ID
|
|
389
|
+
workflow_version: Optional[int] = None, # This is the version number, not the version ID
|
|
406
390
|
return_all_outputs: Optional[bool] = False,
|
|
407
391
|
) -> Dict[str, Any]:
|
|
408
392
|
try:
|
|
@@ -448,9 +432,7 @@ class AsyncPromptLayer(PromptLayerMixin):
|
|
|
448
432
|
span.set_attribute("prompt_name", prompt_name)
|
|
449
433
|
span.set_attribute("function_input", str(_run_internal_kwargs))
|
|
450
434
|
pl_run_span_id = hex(span.context.span_id)[2:].zfill(16)
|
|
451
|
-
result = await self._run_internal(
|
|
452
|
-
**_run_internal_kwargs, pl_run_span_id=pl_run_span_id
|
|
453
|
-
)
|
|
435
|
+
result = await self._run_internal(**_run_internal_kwargs, pl_run_span_id=pl_run_span_id)
|
|
454
436
|
span.set_attribute("function_output", str(result))
|
|
455
437
|
return result
|
|
456
438
|
else:
|
|
@@ -563,9 +545,7 @@ class AsyncPromptLayer(PromptLayerMixin):
|
|
|
563
545
|
input_variables=input_variables,
|
|
564
546
|
metadata=metadata,
|
|
565
547
|
)
|
|
566
|
-
prompt_blueprint = await self.templates.get(
|
|
567
|
-
prompt_name, get_prompt_template_params
|
|
568
|
-
)
|
|
548
|
+
prompt_blueprint = await self.templates.get(prompt_name, get_prompt_template_params)
|
|
569
549
|
prompt_blueprint_model = self._validate_and_extract_model_from_prompt_blueprint(
|
|
570
550
|
prompt_blueprint=prompt_blueprint, prompt_name=prompt_name
|
|
571
551
|
)
|
|
@@ -15,9 +15,7 @@ class PromptLayerBase(object):
|
|
|
15
15
|
"_tracer",
|
|
16
16
|
]
|
|
17
17
|
|
|
18
|
-
def __init__(
|
|
19
|
-
self, obj, function_name="", provider_type="openai", api_key=None, tracer=None
|
|
20
|
-
):
|
|
18
|
+
def __init__(self, obj, function_name="", provider_type="openai", api_key=None, tracer=None):
|
|
21
19
|
object.__setattr__(self, "_obj", obj)
|
|
22
20
|
object.__setattr__(self, "_function_name", function_name)
|
|
23
21
|
object.__setattr__(self, "_provider_type", provider_type)
|
|
@@ -29,29 +27,22 @@ class PromptLayerBase(object):
|
|
|
29
27
|
|
|
30
28
|
if (
|
|
31
29
|
name != "count_tokens" # fix for anthropic count_tokens
|
|
32
|
-
and not re.match(
|
|
33
|
-
|
|
34
|
-
) # fix for anthropic errors
|
|
35
|
-
and not re.match(
|
|
36
|
-
r"<class 'openai\..*Error'>", str(attr)
|
|
37
|
-
) # fix for openai errors
|
|
30
|
+
and not re.match(r"<class 'anthropic\..*Error'>", str(attr)) # fix for anthropic errors
|
|
31
|
+
and not re.match(r"<class 'openai\..*Error'>", str(attr)) # fix for openai errors
|
|
38
32
|
and (
|
|
39
33
|
inspect.isclass(attr)
|
|
40
34
|
or inspect.isfunction(attr)
|
|
41
35
|
or inspect.ismethod(attr)
|
|
42
|
-
or str(type(attr))
|
|
43
|
-
== "<class 'anthropic.resources.completions.
|
|
44
|
-
or str(type(attr))
|
|
45
|
-
== "<class 'anthropic.resources.completions.AsyncCompletions'>"
|
|
36
|
+
or str(type(attr)) == "<class 'anthropic.resources.completions.Completions'>"
|
|
37
|
+
or str(type(attr)) == "<class 'anthropic.resources.completions.AsyncCompletions'>"
|
|
46
38
|
or str(type(attr)) == "<class 'anthropic.resources.messages.Messages'>"
|
|
47
|
-
or str(type(attr))
|
|
48
|
-
== "<class 'anthropic.resources.messages.AsyncMessages'>"
|
|
39
|
+
or str(type(attr)) == "<class 'anthropic.resources.messages.AsyncMessages'>"
|
|
49
40
|
or re.match(r"<class 'openai\.resources.*'>", str(type(attr)))
|
|
50
41
|
)
|
|
51
42
|
):
|
|
52
43
|
return PromptLayerBase(
|
|
53
44
|
attr,
|
|
54
|
-
function_name=f
|
|
45
|
+
function_name=f"{object.__getattribute__(self, '_function_name')}.{name}",
|
|
55
46
|
provider_type=object.__getattribute__(self, "_provider_type"),
|
|
56
47
|
api_key=object.__getattribute__(self, "_api_key"),
|
|
57
48
|
tracer=object.__getattribute__(self, "_tracer"),
|
|
@@ -77,16 +68,10 @@ class PromptLayerBase(object):
|
|
|
77
68
|
|
|
78
69
|
if tracer:
|
|
79
70
|
with tracer.start_as_current_span(function_name) as llm_request_span:
|
|
80
|
-
llm_request_span_id = hex(llm_request_span.context.span_id)[2:].zfill(
|
|
81
|
-
|
|
82
|
-
)
|
|
83
|
-
llm_request_span.set_attribute(
|
|
84
|
-
"provider", object.__getattribute__(self, "_provider_type")
|
|
85
|
-
)
|
|
71
|
+
llm_request_span_id = hex(llm_request_span.context.span_id)[2:].zfill(16)
|
|
72
|
+
llm_request_span.set_attribute("provider", object.__getattribute__(self, "_provider_type"))
|
|
86
73
|
llm_request_span.set_attribute("function_name", function_name)
|
|
87
|
-
llm_request_span.set_attribute(
|
|
88
|
-
"function_input", str({"args": args, "kwargs": kwargs})
|
|
89
|
-
)
|
|
74
|
+
llm_request_span.set_attribute("function_input", str({"args": args, "kwargs": kwargs}))
|
|
90
75
|
|
|
91
76
|
if inspect.isclass(function_object):
|
|
92
77
|
result = PromptLayerBase(
|
|
@@ -101,9 +86,7 @@ class PromptLayerBase(object):
|
|
|
101
86
|
|
|
102
87
|
function_response = function_object(*args, **kwargs)
|
|
103
88
|
|
|
104
|
-
if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(
|
|
105
|
-
function_response
|
|
106
|
-
):
|
|
89
|
+
if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(function_response):
|
|
107
90
|
return async_wrapper(
|
|
108
91
|
function_response,
|
|
109
92
|
return_pl_id,
|
|
@@ -146,9 +129,7 @@ class PromptLayerBase(object):
|
|
|
146
129
|
|
|
147
130
|
function_response = function_object(*args, **kwargs)
|
|
148
131
|
|
|
149
|
-
if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(
|
|
150
|
-
function_response
|
|
151
|
-
):
|
|
132
|
+
if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(function_response):
|
|
152
133
|
return async_wrapper(
|
|
153
134
|
function_response,
|
|
154
135
|
return_pl_id,
|
|
@@ -138,9 +138,7 @@ class PromptLayerMixin:
|
|
|
138
138
|
@staticmethod
|
|
139
139
|
def _initialize_tracer(api_key: str = None, enable_tracing: bool = False):
|
|
140
140
|
if enable_tracing:
|
|
141
|
-
resource = Resource(
|
|
142
|
-
attributes={ResourceAttributes.SERVICE_NAME: "prompt-layer-library"}
|
|
143
|
-
)
|
|
141
|
+
resource = Resource(attributes={ResourceAttributes.SERVICE_NAME: "prompt-layer-library"})
|
|
144
142
|
tracer_provider = TracerProvider(resource=resource)
|
|
145
143
|
promptlayer_exporter = PromptLayerSpanExporter(api_key=api_key)
|
|
146
144
|
span_processor = BatchSpanProcessor(promptlayer_exporter)
|
|
@@ -151,9 +149,7 @@ class PromptLayerMixin:
|
|
|
151
149
|
return None, None
|
|
152
150
|
|
|
153
151
|
@staticmethod
|
|
154
|
-
def _prepare_get_prompt_template_params(
|
|
155
|
-
*, prompt_version, prompt_release_label, input_variables, metadata
|
|
156
|
-
):
|
|
152
|
+
def _prepare_get_prompt_template_params(*, prompt_version, prompt_release_label, input_variables, metadata):
|
|
157
153
|
params = {}
|
|
158
154
|
|
|
159
155
|
if prompt_version:
|
|
@@ -206,27 +202,19 @@ class PromptLayerMixin:
|
|
|
206
202
|
}
|
|
207
203
|
|
|
208
204
|
@staticmethod
|
|
209
|
-
def _validate_and_extract_model_from_prompt_blueprint(
|
|
210
|
-
*, prompt_blueprint, prompt_name
|
|
211
|
-
):
|
|
205
|
+
def _validate_and_extract_model_from_prompt_blueprint(*, prompt_blueprint, prompt_name):
|
|
212
206
|
if not prompt_blueprint["llm_kwargs"]:
|
|
213
|
-
raise ValueError(
|
|
214
|
-
f"Prompt '{prompt_name}' does not have any LLM kwargs associated with it."
|
|
215
|
-
)
|
|
207
|
+
raise ValueError(f"Prompt '{prompt_name}' does not have any LLM kwargs associated with it.")
|
|
216
208
|
|
|
217
209
|
prompt_blueprint_metadata = prompt_blueprint.get("metadata")
|
|
218
210
|
|
|
219
211
|
if not prompt_blueprint_metadata:
|
|
220
|
-
raise ValueError(
|
|
221
|
-
f"Prompt '{prompt_name}' does not have any metadata associated with it."
|
|
222
|
-
)
|
|
212
|
+
raise ValueError(f"Prompt '{prompt_name}' does not have any metadata associated with it.")
|
|
223
213
|
|
|
224
214
|
prompt_blueprint_model = prompt_blueprint_metadata.get("model")
|
|
225
215
|
|
|
226
216
|
if not prompt_blueprint_model:
|
|
227
|
-
raise ValueError(
|
|
228
|
-
f"Prompt '{prompt_name}' does not have a model parameters associated with it."
|
|
229
|
-
)
|
|
217
|
+
raise ValueError(f"Prompt '{prompt_name}' does not have a model parameters associated with it.")
|
|
230
218
|
|
|
231
219
|
return prompt_blueprint_model
|
|
232
220
|
|
|
@@ -247,12 +235,8 @@ class PromptLayerMixin:
|
|
|
247
235
|
"args": [],
|
|
248
236
|
"kwargs": request_params["kwargs"],
|
|
249
237
|
"tags": tags,
|
|
250
|
-
"request_start_time": datetime.datetime.now(
|
|
251
|
-
|
|
252
|
-
).timestamp(),
|
|
253
|
-
"request_end_time": datetime.datetime.now(
|
|
254
|
-
datetime.timezone.utc
|
|
255
|
-
).timestamp(),
|
|
238
|
+
"request_start_time": datetime.datetime.now(datetime.timezone.utc).timestamp(),
|
|
239
|
+
"request_end_time": datetime.datetime.now(datetime.timezone.utc).timestamp(),
|
|
256
240
|
"api_key": api_key,
|
|
257
241
|
"metadata": metadata,
|
|
258
242
|
"prompt_id": request_params["prompt_blueprint"]["id"],
|
|
@@ -275,9 +259,7 @@ class PromptLayerMixin:
|
|
|
275
259
|
for key, value in attributes.items():
|
|
276
260
|
span.set_attribute(key, value)
|
|
277
261
|
|
|
278
|
-
span.set_attribute(
|
|
279
|
-
"function_input", str({"args": args, "kwargs": kwargs})
|
|
280
|
-
)
|
|
262
|
+
span.set_attribute("function_input", str({"args": args, "kwargs": kwargs}))
|
|
281
263
|
result = func(*args, **kwargs)
|
|
282
264
|
span.set_attribute("function_output", str(result))
|
|
283
265
|
|
|
@@ -294,9 +276,7 @@ class PromptLayerMixin:
|
|
|
294
276
|
for key, value in attributes.items():
|
|
295
277
|
span.set_attribute(key, value)
|
|
296
278
|
|
|
297
|
-
span.set_attribute(
|
|
298
|
-
"function_input", str({"args": args, "kwargs": kwargs})
|
|
299
|
-
)
|
|
279
|
+
span.set_attribute("function_input", str({"args": args, "kwargs": kwargs}))
|
|
300
280
|
result = await func(*args, **kwargs)
|
|
301
281
|
span.set_attribute("function_output", str(result))
|
|
302
282
|
|
|
@@ -19,12 +19,8 @@ class PromptLayerSpanExporter(SpanExporter):
|
|
|
19
19
|
span_info = {
|
|
20
20
|
"name": span.name,
|
|
21
21
|
"context": {
|
|
22
|
-
"trace_id": hex(span.context.trace_id)[2:].zfill(
|
|
23
|
-
|
|
24
|
-
), # Ensure 32 characters
|
|
25
|
-
"span_id": hex(span.context.span_id)[2:].zfill(
|
|
26
|
-
16
|
|
27
|
-
), # Ensure 16 characters
|
|
22
|
+
"trace_id": hex(span.context.trace_id)[2:].zfill(32), # Ensure 32 characters
|
|
23
|
+
"span_id": hex(span.context.span_id)[2:].zfill(16), # Ensure 16 characters
|
|
28
24
|
"trace_state": str(span.context.trace_state),
|
|
29
25
|
},
|
|
30
26
|
"kind": str(span.kind),
|
|
@@ -44,10 +40,7 @@ class PromptLayerSpanExporter(SpanExporter):
|
|
|
44
40
|
}
|
|
45
41
|
for event in span.events
|
|
46
42
|
],
|
|
47
|
-
"links": [
|
|
48
|
-
{"context": link.context, "attributes": dict(link.attributes)}
|
|
49
|
-
for link in span.links
|
|
50
|
-
],
|
|
43
|
+
"links": [{"context": link.context, "attributes": dict(link.attributes)} for link in span.links],
|
|
51
44
|
"resource": {
|
|
52
45
|
"attributes": dict(span.resource.attributes),
|
|
53
46
|
"schema_url": span.resource.schema_url,
|
|
@@ -28,9 +28,7 @@ class AsyncTemplateManager:
|
|
|
28
28
|
def __init__(self, api_key: str):
|
|
29
29
|
self.api_key = api_key
|
|
30
30
|
|
|
31
|
-
async def get(
|
|
32
|
-
self, prompt_name: str, params: Union[GetPromptTemplate, None] = None
|
|
33
|
-
):
|
|
31
|
+
async def get(self, prompt_name: str, params: Union[GetPromptTemplate, None] = None):
|
|
34
32
|
return await aget_prompt_template(prompt_name, params, self.api_key)
|
|
35
33
|
|
|
36
34
|
async def all(self, page: int = 1, per_page: int = 30):
|
|
@@ -1,7 +1,15 @@
|
|
|
1
|
-
from promptlayer.track.track import
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
1
|
+
from promptlayer.track.track import (
|
|
2
|
+
agroup,
|
|
3
|
+
ametadata,
|
|
4
|
+
aprompt,
|
|
5
|
+
ascore,
|
|
6
|
+
group,
|
|
7
|
+
metadata as metadata_,
|
|
8
|
+
prompt,
|
|
9
|
+
score as score_,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
# TODO(dmu) LOW: Move this code to another file
|
|
5
13
|
|
|
6
14
|
|
|
7
15
|
class TrackManager:
|
|
@@ -14,9 +22,7 @@ class TrackManager:
|
|
|
14
22
|
def metadata(self, request_id, metadata):
|
|
15
23
|
return metadata_(request_id, metadata, self.api_key)
|
|
16
24
|
|
|
17
|
-
def prompt(
|
|
18
|
-
self, request_id, prompt_name, prompt_input_variables, version=None, label=None
|
|
19
|
-
):
|
|
25
|
+
def prompt(self, request_id, prompt_name, prompt_input_variables, version=None, label=None):
|
|
20
26
|
return prompt(
|
|
21
27
|
request_id,
|
|
22
28
|
prompt_name,
|
|
@@ -40,9 +46,7 @@ class AsyncTrackManager:
|
|
|
40
46
|
async def metadata(self, request_id, metadata):
|
|
41
47
|
return await ametadata(request_id, metadata, self.api_key)
|
|
42
48
|
|
|
43
|
-
async def prompt(
|
|
44
|
-
self, request_id, prompt_name, prompt_input_variables, version=None, label=None
|
|
45
|
-
):
|
|
49
|
+
async def prompt(self, request_id, prompt_name, prompt_input_variables, version=None, label=None):
|
|
46
50
|
return await aprompt(
|
|
47
51
|
request_id,
|
|
48
52
|
prompt_name,
|
|
@@ -20,9 +20,7 @@ def prompt(
|
|
|
20
20
|
):
|
|
21
21
|
if not isinstance(prompt_input_variables, dict):
|
|
22
22
|
raise Exception("Please provide a dictionary of input variables.")
|
|
23
|
-
return promptlayer_track_prompt(
|
|
24
|
-
request_id, prompt_name, prompt_input_variables, api_key, version, label
|
|
25
|
-
)
|
|
23
|
+
return promptlayer_track_prompt(request_id, prompt_name, prompt_input_variables, api_key, version, label)
|
|
26
24
|
|
|
27
25
|
|
|
28
26
|
def metadata(request_id, metadata, api_key: str = None):
|
|
@@ -30,9 +28,7 @@ def metadata(request_id, metadata, api_key: str = None):
|
|
|
30
28
|
raise Exception("Please provide a dictionary of metadata.")
|
|
31
29
|
for key, value in metadata.items():
|
|
32
30
|
if not isinstance(key, str) or not isinstance(value, str):
|
|
33
|
-
raise Exception(
|
|
34
|
-
"Please provide a dictionary of metadata with key value pair of strings."
|
|
35
|
-
)
|
|
31
|
+
raise Exception("Please provide a dictionary of metadata with key value pair of strings.")
|
|
36
32
|
return promptlayer_track_metadata(request_id, metadata, api_key)
|
|
37
33
|
|
|
38
34
|
|
|
@@ -60,9 +56,7 @@ async def aprompt(
|
|
|
60
56
|
):
|
|
61
57
|
if not isinstance(prompt_input_variables, dict):
|
|
62
58
|
raise Exception("Please provide a dictionary of input variables.")
|
|
63
|
-
return await apromptlayer_track_prompt(
|
|
64
|
-
request_id, prompt_name, prompt_input_variables, api_key, version, label
|
|
65
|
-
)
|
|
59
|
+
return await apromptlayer_track_prompt(request_id, prompt_name, prompt_input_variables, api_key, version, label)
|
|
66
60
|
|
|
67
61
|
|
|
68
62
|
async def ametadata(request_id, metadata, api_key: str = None):
|
|
@@ -70,9 +64,7 @@ async def ametadata(request_id, metadata, api_key: str = None):
|
|
|
70
64
|
raise Exception("Please provide a dictionary of metadata.")
|
|
71
65
|
for key, value in metadata.items():
|
|
72
66
|
if not isinstance(key, str) or not isinstance(value, str):
|
|
73
|
-
raise Exception(
|
|
74
|
-
"Please provide a dictionary of metadata with key-value pairs of strings."
|
|
75
|
-
)
|
|
67
|
+
raise Exception("Please provide a dictionary of metadata with key-value pairs of strings.")
|
|
76
68
|
return await apromptlayer_track_metadata(request_id, metadata, api_key)
|
|
77
69
|
|
|
78
70
|
|
|
@@ -35,9 +35,7 @@ from promptlayer.types.prompt_template import (
|
|
|
35
35
|
PublishPromptTemplateResponse,
|
|
36
36
|
)
|
|
37
37
|
|
|
38
|
-
URL_API_PROMPTLAYER = os.environ.setdefault(
|
|
39
|
-
"URL_API_PROMPTLAYER", "https://api.promptlayer.com"
|
|
40
|
-
)
|
|
38
|
+
URL_API_PROMPTLAYER = os.environ.setdefault("URL_API_PROMPTLAYER", "https://api.promptlayer.com")
|
|
41
39
|
|
|
42
40
|
|
|
43
41
|
async def arun_workflow_request(
|
|
@@ -237,10 +235,7 @@ def convert_native_object_to_dict(native_object):
|
|
|
237
235
|
if isinstance(native_object, Enum):
|
|
238
236
|
return native_object.value
|
|
239
237
|
if hasattr(native_object, "__dict__"):
|
|
240
|
-
return {
|
|
241
|
-
k: convert_native_object_to_dict(v)
|
|
242
|
-
for k, v in native_object.__dict__.items()
|
|
243
|
-
}
|
|
238
|
+
return {k: convert_native_object_to_dict(v) for k, v in native_object.__dict__.items()}
|
|
244
239
|
return native_object
|
|
245
240
|
|
|
246
241
|
|
|
@@ -262,9 +257,7 @@ def promptlayer_api_request(
|
|
|
262
257
|
if isinstance(response, dict) and hasattr(response, "to_dict_recursive"):
|
|
263
258
|
response = response.to_dict_recursive()
|
|
264
259
|
request_response = None
|
|
265
|
-
if hasattr(
|
|
266
|
-
response, "dict"
|
|
267
|
-
): # added this for anthropic 3.0 changes, they return a completion object
|
|
260
|
+
if hasattr(response, "dict"): # added this for anthropic 3.0 changes, they return a completion object
|
|
268
261
|
response = response.dict()
|
|
269
262
|
try:
|
|
270
263
|
request_response = requests.post(
|
|
@@ -371,9 +364,7 @@ def promptlayer_api_request_async(
|
|
|
371
364
|
)
|
|
372
365
|
|
|
373
366
|
|
|
374
|
-
def promptlayer_get_prompt(
|
|
375
|
-
prompt_name, api_key, version: int = None, label: str = None
|
|
376
|
-
):
|
|
367
|
+
def promptlayer_get_prompt(prompt_name, api_key, version: int = None, label: str = None):
|
|
377
368
|
"""
|
|
378
369
|
Get a prompt from the PromptLayer library
|
|
379
370
|
version: version of the prompt to get, None for latest
|
|
@@ -386,9 +377,7 @@ def promptlayer_get_prompt(
|
|
|
386
377
|
params={"prompt_name": prompt_name, "version": version, "label": label},
|
|
387
378
|
)
|
|
388
379
|
except Exception as e:
|
|
389
|
-
raise Exception(
|
|
390
|
-
f"PromptLayer had the following error while getting your prompt: {e}"
|
|
391
|
-
)
|
|
380
|
+
raise Exception(f"PromptLayer had the following error while getting your prompt: {e}")
|
|
392
381
|
if request_response.status_code != 200:
|
|
393
382
|
raise_on_bad_response(
|
|
394
383
|
request_response,
|
|
@@ -398,9 +387,7 @@ def promptlayer_get_prompt(
|
|
|
398
387
|
return request_response.json()
|
|
399
388
|
|
|
400
389
|
|
|
401
|
-
def promptlayer_publish_prompt(
|
|
402
|
-
prompt_name, prompt_template, commit_message, tags, api_key, metadata=None
|
|
403
|
-
):
|
|
390
|
+
def promptlayer_publish_prompt(prompt_name, prompt_template, commit_message, tags, api_key, metadata=None):
|
|
404
391
|
try:
|
|
405
392
|
request_response = requests.post(
|
|
406
393
|
f"{URL_API_PROMPTLAYER}/library-publish-prompt-template",
|
|
@@ -414,9 +401,7 @@ def promptlayer_publish_prompt(
|
|
|
414
401
|
},
|
|
415
402
|
)
|
|
416
403
|
except Exception as e:
|
|
417
|
-
raise Exception(
|
|
418
|
-
f"PromptLayer had the following error while publishing your prompt: {e}"
|
|
419
|
-
)
|
|
404
|
+
raise Exception(f"PromptLayer had the following error while publishing your prompt: {e}")
|
|
420
405
|
if request_response.status_code != 200:
|
|
421
406
|
raise_on_bad_response(
|
|
422
407
|
request_response,
|
|
@@ -425,9 +410,7 @@ def promptlayer_publish_prompt(
|
|
|
425
410
|
return True
|
|
426
411
|
|
|
427
412
|
|
|
428
|
-
def promptlayer_track_prompt(
|
|
429
|
-
request_id, prompt_name, input_variables, api_key, version, label
|
|
430
|
-
):
|
|
413
|
+
def promptlayer_track_prompt(request_id, prompt_name, input_variables, api_key, version, label):
|
|
431
414
|
try:
|
|
432
415
|
request_response = requests.post(
|
|
433
416
|
f"{URL_API_PROMPTLAYER}/library-track-prompt",
|
|
@@ -516,9 +499,7 @@ def promptlayer_track_metadata(request_id, metadata, api_key):
|
|
|
516
499
|
return True
|
|
517
500
|
|
|
518
501
|
|
|
519
|
-
async def apromptlayer_track_metadata(
|
|
520
|
-
request_id: str, metadata: Dict[str, Any], api_key: Optional[str] = None
|
|
521
|
-
) -> bool:
|
|
502
|
+
async def apromptlayer_track_metadata(request_id: str, metadata: Dict[str, Any], api_key: Optional[str] = None) -> bool:
|
|
522
503
|
url = f"{URL_API_PROMPTLAYER}/library-track-metadata"
|
|
523
504
|
payload = {
|
|
524
505
|
"request_id": request_id,
|
|
@@ -649,9 +630,7 @@ class GeneratorProxy:
|
|
|
649
630
|
|
|
650
631
|
def __getattr__(self, name):
|
|
651
632
|
if name == "text_stream": # anthropic async stream
|
|
652
|
-
return GeneratorProxy(
|
|
653
|
-
self.generator.text_stream, self.api_request_arugments, self.api_key
|
|
654
|
-
)
|
|
633
|
+
return GeneratorProxy(self.generator.text_stream, self.api_request_arugments, self.api_key)
|
|
655
634
|
return getattr(self.generator, name)
|
|
656
635
|
|
|
657
636
|
def _abstracted_next(self, result):
|
|
@@ -668,8 +647,7 @@ class GeneratorProxy:
|
|
|
668
647
|
end_anthropic = True
|
|
669
648
|
|
|
670
649
|
end_openai = provider_type == "openai" and (
|
|
671
|
-
result.choices[0].finish_reason == "stop"
|
|
672
|
-
or result.choices[0].finish_reason == "length"
|
|
650
|
+
result.choices[0].finish_reason == "stop" or result.choices[0].finish_reason == "length"
|
|
673
651
|
)
|
|
674
652
|
|
|
675
653
|
if end_anthropic or end_openai:
|
|
@@ -684,9 +662,7 @@ class GeneratorProxy:
|
|
|
684
662
|
request_end_time=self.api_request_arugments["request_end_time"],
|
|
685
663
|
api_key=self.api_key,
|
|
686
664
|
return_pl_id=self.api_request_arugments["return_pl_id"],
|
|
687
|
-
llm_request_span_id=self.api_request_arugments.get(
|
|
688
|
-
"llm_request_span_id"
|
|
689
|
-
),
|
|
665
|
+
llm_request_span_id=self.api_request_arugments.get("llm_request_span_id"),
|
|
690
666
|
)
|
|
691
667
|
|
|
692
668
|
if self.api_request_arugments["return_pl_id"]:
|
|
@@ -716,8 +692,7 @@ class GeneratorProxy:
|
|
|
716
692
|
elif hasattr(result, "delta") and hasattr(result.delta, "text"):
|
|
717
693
|
response = f"{response}{result.delta.text}"
|
|
718
694
|
if (
|
|
719
|
-
hasattr(self.results[-1], "type")
|
|
720
|
-
and self.results[-1].type == "message_stop"
|
|
695
|
+
hasattr(self.results[-1], "type") and self.results[-1].type == "message_stop"
|
|
721
696
|
): # this is a message stream and not the correct event
|
|
722
697
|
final_result = deepcopy(self.results[0].message)
|
|
723
698
|
final_result.usage = None
|
|
@@ -735,23 +710,15 @@ class GeneratorProxy:
|
|
|
735
710
|
final_result = deepcopy(self.results[-1])
|
|
736
711
|
final_result.choices[0].text = response
|
|
737
712
|
return final_result
|
|
738
|
-
elif hasattr(
|
|
739
|
-
self.results[0].choices[0], "delta"
|
|
740
|
-
): # this is completion with delta
|
|
713
|
+
elif hasattr(self.results[0].choices[0], "delta"): # this is completion with delta
|
|
741
714
|
response = {"role": "", "content": ""}
|
|
742
715
|
for result in self.results:
|
|
743
|
-
if (
|
|
744
|
-
hasattr(result.choices[0].delta, "role")
|
|
745
|
-
and result.choices[0].delta.role is not None
|
|
746
|
-
):
|
|
716
|
+
if hasattr(result.choices[0].delta, "role") and result.choices[0].delta.role is not None:
|
|
747
717
|
response["role"] = result.choices[0].delta.role
|
|
748
|
-
if (
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
response["content"] = response[
|
|
753
|
-
"content"
|
|
754
|
-
] = f"{response['content']}{result.choices[0].delta.content}"
|
|
718
|
+
if hasattr(result.choices[0].delta, "content") and result.choices[0].delta.content is not None:
|
|
719
|
+
response["content"] = response["content"] = (
|
|
720
|
+
f"{response['content']}{result.choices[0].delta.content}"
|
|
721
|
+
)
|
|
755
722
|
final_result = deepcopy(self.results[-1])
|
|
756
723
|
final_result.choices[0] = response
|
|
757
724
|
return final_result
|
|
@@ -854,9 +821,7 @@ def promptlayer_create_group(api_key: str = None):
|
|
|
854
821
|
return False
|
|
855
822
|
except requests.exceptions.RequestException as e:
|
|
856
823
|
# I'm aiming for a more specific exception catch here
|
|
857
|
-
raise Exception(
|
|
858
|
-
f"PromptLayer had the following error while creating your group: {e}"
|
|
859
|
-
)
|
|
824
|
+
raise Exception(f"PromptLayer had the following error while creating your group: {e}")
|
|
860
825
|
return request_response.json()["id"]
|
|
861
826
|
|
|
862
827
|
|
|
@@ -877,9 +842,7 @@ async def apromptlayer_create_group(api_key: Optional[str] = None) -> str:
|
|
|
877
842
|
return False
|
|
878
843
|
return response.json()["id"]
|
|
879
844
|
except httpx.RequestError as e:
|
|
880
|
-
raise Exception(
|
|
881
|
-
f"PromptLayer had the following error while creating your group: {str(e)}"
|
|
882
|
-
) from e
|
|
845
|
+
raise Exception(f"PromptLayer had the following error while creating your group: {str(e)}") from e
|
|
883
846
|
|
|
884
847
|
|
|
885
848
|
def promptlayer_track_group(request_id, group_id, api_key: str = None):
|
|
@@ -900,9 +863,7 @@ def promptlayer_track_group(request_id, group_id, api_key: str = None):
|
|
|
900
863
|
return False
|
|
901
864
|
except requests.exceptions.RequestException as e:
|
|
902
865
|
# I'm aiming for a more specific exception catch here
|
|
903
|
-
raise Exception(
|
|
904
|
-
f"PromptLayer had the following error while tracking your group: {e}"
|
|
905
|
-
)
|
|
866
|
+
raise Exception(f"PromptLayer had the following error while tracking your group: {e}")
|
|
906
867
|
return True
|
|
907
868
|
|
|
908
869
|
|
|
@@ -948,9 +909,7 @@ def get_prompt_template(
|
|
|
948
909
|
json=json_body,
|
|
949
910
|
)
|
|
950
911
|
if response.status_code != 200:
|
|
951
|
-
raise Exception(
|
|
952
|
-
f"PromptLayer had the following error while getting your prompt template: {response.text}"
|
|
953
|
-
)
|
|
912
|
+
raise Exception(f"PromptLayer had the following error while getting your prompt template: {response.text}")
|
|
954
913
|
|
|
955
914
|
warning = response.json().get("warning", None)
|
|
956
915
|
if warning is not None:
|
|
@@ -960,9 +919,7 @@ def get_prompt_template(
|
|
|
960
919
|
)
|
|
961
920
|
return response.json()
|
|
962
921
|
except requests.exceptions.RequestException as e:
|
|
963
|
-
raise Exception(
|
|
964
|
-
f"PromptLayer had the following error while getting your prompt template: {e}"
|
|
965
|
-
)
|
|
922
|
+
raise Exception(f"PromptLayer had the following error while getting your prompt template: {e}")
|
|
966
923
|
|
|
967
924
|
|
|
968
925
|
async def aget_prompt_template(
|
|
@@ -993,9 +950,7 @@ async def aget_prompt_template(
|
|
|
993
950
|
)
|
|
994
951
|
return response.json()
|
|
995
952
|
except httpx.RequestError as e:
|
|
996
|
-
raise Exception(
|
|
997
|
-
f"PromptLayer had the following error while getting your prompt template: {str(e)}"
|
|
998
|
-
) from e
|
|
953
|
+
raise Exception(f"PromptLayer had the following error while getting your prompt template: {str(e)}") from e
|
|
999
954
|
|
|
1000
955
|
|
|
1001
956
|
def publish_prompt_template(
|
|
@@ -1018,9 +973,7 @@ def publish_prompt_template(
|
|
|
1018
973
|
)
|
|
1019
974
|
return response.json()
|
|
1020
975
|
except requests.exceptions.RequestException as e:
|
|
1021
|
-
raise Exception(
|
|
1022
|
-
f"PromptLayer had the following error while publishing your prompt template: {e}"
|
|
1023
|
-
)
|
|
976
|
+
raise Exception(f"PromptLayer had the following error while publishing your prompt template: {e}")
|
|
1024
977
|
|
|
1025
978
|
|
|
1026
979
|
async def apublish_prompt_template(
|
|
@@ -1049,9 +1002,7 @@ async def apublish_prompt_template(
|
|
|
1049
1002
|
)
|
|
1050
1003
|
return response.json()
|
|
1051
1004
|
except httpx.RequestError as e:
|
|
1052
|
-
raise Exception(
|
|
1053
|
-
f"PromptLayer had the following error while publishing your prompt template: {str(e)}"
|
|
1054
|
-
) from e
|
|
1005
|
+
raise Exception(f"PromptLayer had the following error while publishing your prompt template: {str(e)}") from e
|
|
1055
1006
|
|
|
1056
1007
|
|
|
1057
1008
|
def get_all_prompt_templates(
|
|
@@ -1070,9 +1021,7 @@ def get_all_prompt_templates(
|
|
|
1070
1021
|
items = response.json().get("items", [])
|
|
1071
1022
|
return items
|
|
1072
1023
|
except requests.exceptions.RequestException as e:
|
|
1073
|
-
raise Exception(
|
|
1074
|
-
f"PromptLayer had the following error while getting all your prompt templates: {e}"
|
|
1075
|
-
)
|
|
1024
|
+
raise Exception(f"PromptLayer had the following error while getting all your prompt templates: {e}")
|
|
1076
1025
|
|
|
1077
1026
|
|
|
1078
1027
|
async def aget_all_prompt_templates(
|
|
@@ -1093,9 +1042,7 @@ async def aget_all_prompt_templates(
|
|
|
1093
1042
|
items = response.json().get("items", [])
|
|
1094
1043
|
return items
|
|
1095
1044
|
except httpx.RequestError as e:
|
|
1096
|
-
raise Exception(
|
|
1097
|
-
f"PromptLayer had the following error while getting all your prompt templates: {str(e)}"
|
|
1098
|
-
) from e
|
|
1045
|
+
raise Exception(f"PromptLayer had the following error while getting all your prompt templates: {str(e)}") from e
|
|
1099
1046
|
|
|
1100
1047
|
|
|
1101
1048
|
def openai_stream_chat(results: list):
|
|
@@ -1158,10 +1105,10 @@ def openai_stream_chat(results: list):
|
|
|
1158
1105
|
)
|
|
1159
1106
|
)
|
|
1160
1107
|
continue
|
|
1161
|
-
last_tool_call.function.name =
|
|
1162
|
-
|
|
1108
|
+
last_tool_call.function.name = f"{last_tool_call.function.name}{tool_call.function.name or ''}"
|
|
1109
|
+
last_tool_call.function.arguments = (
|
|
1110
|
+
f"{last_tool_call.function.arguments}{tool_call.function.arguments or ''}"
|
|
1163
1111
|
)
|
|
1164
|
-
last_tool_call.function.arguments = f"{last_tool_call.function.arguments}{tool_call.function.arguments or ''}"
|
|
1165
1112
|
|
|
1166
1113
|
response.choices[0].message.content = content
|
|
1167
1114
|
response.choices[0].message.tool_calls = tool_calls
|
|
@@ -1224,10 +1171,10 @@ async def aopenai_stream_chat(generator: AsyncIterable[Any]) -> Any:
|
|
|
1224
1171
|
)
|
|
1225
1172
|
)
|
|
1226
1173
|
continue
|
|
1227
|
-
last_tool_call.function.name =
|
|
1228
|
-
|
|
1174
|
+
last_tool_call.function.name = f"{last_tool_call.function.name}{tool_call.function.name or ''}"
|
|
1175
|
+
last_tool_call.function.arguments = (
|
|
1176
|
+
f"{last_tool_call.function.arguments}{tool_call.function.arguments or ''}"
|
|
1229
1177
|
)
|
|
1230
|
-
last_tool_call.function.arguments = f"{last_tool_call.function.arguments}{tool_call.function.arguments or ''}"
|
|
1231
1178
|
|
|
1232
1179
|
# After collecting all chunks, set the response attributes
|
|
1233
1180
|
if chat_completion_chunks:
|
|
@@ -1414,9 +1361,7 @@ async def aanthropic_stream_completion(generator: AsyncIterable[Any]) -> Any:
|
|
|
1414
1361
|
return response
|
|
1415
1362
|
|
|
1416
1363
|
|
|
1417
|
-
def stream_response(
|
|
1418
|
-
generator: Generator, after_stream: Callable, map_results: Callable
|
|
1419
|
-
):
|
|
1364
|
+
def stream_response(generator: Generator, after_stream: Callable, map_results: Callable):
|
|
1420
1365
|
data = {
|
|
1421
1366
|
"request_id": None,
|
|
1422
1367
|
"raw_response": None,
|
|
@@ -1455,9 +1400,7 @@ async def astream_response(
|
|
|
1455
1400
|
yield item
|
|
1456
1401
|
|
|
1457
1402
|
request_response = await map_results(async_generator_from_list(results))
|
|
1458
|
-
after_stream_response = await after_stream(
|
|
1459
|
-
request_response=request_response.model_dump()
|
|
1460
|
-
)
|
|
1403
|
+
after_stream_response = await after_stream(request_response=request_response.model_dump())
|
|
1461
1404
|
data["request_id"] = after_stream_response.get("request_id")
|
|
1462
1405
|
data["prompt_blueprint"] = after_stream_response.get("prompt_blueprint")
|
|
1463
1406
|
yield data
|
|
@@ -1481,9 +1424,7 @@ def openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
|
|
|
1481
1424
|
from openai import OpenAI
|
|
1482
1425
|
|
|
1483
1426
|
client = OpenAI(base_url=kwargs.pop("base_url", None))
|
|
1484
|
-
request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[
|
|
1485
|
-
prompt_blueprint["prompt_template"]["type"]
|
|
1486
|
-
]
|
|
1427
|
+
request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
|
|
1487
1428
|
return request_to_make(client, **kwargs)
|
|
1488
1429
|
|
|
1489
1430
|
|
|
@@ -1505,9 +1446,7 @@ async def aopenai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs)
|
|
|
1505
1446
|
from openai import AsyncOpenAI
|
|
1506
1447
|
|
|
1507
1448
|
client = AsyncOpenAI(base_url=kwargs.pop("base_url", None))
|
|
1508
|
-
request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[
|
|
1509
|
-
prompt_blueprint["prompt_template"]["type"]
|
|
1510
|
-
]
|
|
1449
|
+
request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
|
|
1511
1450
|
return await request_to_make(client, **kwargs)
|
|
1512
1451
|
|
|
1513
1452
|
|
|
@@ -1515,9 +1454,7 @@ def azure_openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
|
|
|
1515
1454
|
from openai import AzureOpenAI
|
|
1516
1455
|
|
|
1517
1456
|
client = AzureOpenAI(azure_endpoint=kwargs.pop("base_url", None))
|
|
1518
|
-
request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[
|
|
1519
|
-
prompt_blueprint["prompt_template"]["type"]
|
|
1520
|
-
]
|
|
1457
|
+
request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
|
|
1521
1458
|
return request_to_make(client, **kwargs)
|
|
1522
1459
|
|
|
1523
1460
|
|
|
@@ -1525,9 +1462,7 @@ async def aazure_openai_request(prompt_blueprint: GetPromptTemplateResponse, **k
|
|
|
1525
1462
|
from openai import AsyncAzureOpenAI
|
|
1526
1463
|
|
|
1527
1464
|
client = AsyncAzureOpenAI(azure_endpoint=kwargs.pop("base_url", None))
|
|
1528
|
-
request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[
|
|
1529
|
-
prompt_blueprint["prompt_template"]["type"]
|
|
1530
|
-
]
|
|
1465
|
+
request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
|
|
1531
1466
|
return await request_to_make(client, **kwargs)
|
|
1532
1467
|
|
|
1533
1468
|
|
|
@@ -1549,9 +1484,7 @@ def anthropic_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
|
|
|
1549
1484
|
from anthropic import Anthropic
|
|
1550
1485
|
|
|
1551
1486
|
client = Anthropic(base_url=kwargs.pop("base_url", None))
|
|
1552
|
-
request_to_make = MAP_TYPE_TO_ANTHROPIC_FUNCTION[
|
|
1553
|
-
prompt_blueprint["prompt_template"]["type"]
|
|
1554
|
-
]
|
|
1487
|
+
request_to_make = MAP_TYPE_TO_ANTHROPIC_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
|
|
1555
1488
|
return request_to_make(client, **kwargs)
|
|
1556
1489
|
|
|
1557
1490
|
|
|
@@ -1573,9 +1506,7 @@ async def aanthropic_request(prompt_blueprint: GetPromptTemplateResponse, **kwar
|
|
|
1573
1506
|
from anthropic import AsyncAnthropic
|
|
1574
1507
|
|
|
1575
1508
|
client = AsyncAnthropic(base_url=kwargs.pop("base_url", None))
|
|
1576
|
-
request_to_make = AMAP_TYPE_TO_ANTHROPIC_FUNCTION[
|
|
1577
|
-
prompt_blueprint["prompt_template"]["type"]
|
|
1578
|
-
]
|
|
1509
|
+
request_to_make = AMAP_TYPE_TO_ANTHROPIC_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
|
|
1579
1510
|
return await request_to_make(client, **kwargs)
|
|
1580
1511
|
|
|
1581
1512
|
|
|
@@ -1715,11 +1646,11 @@ def mistral_stream_chat(results: list):
|
|
|
1715
1646
|
else:
|
|
1716
1647
|
last_tool_call = tool_calls[-1]
|
|
1717
1648
|
if tool_call.function.name:
|
|
1718
|
-
last_tool_call.function.name =
|
|
1719
|
-
f"{last_tool_call.function.name}{tool_call.function.name}"
|
|
1720
|
-
)
|
|
1649
|
+
last_tool_call.function.name = f"{last_tool_call.function.name}{tool_call.function.name}"
|
|
1721
1650
|
if tool_call.function.arguments:
|
|
1722
|
-
last_tool_call.function.arguments =
|
|
1651
|
+
last_tool_call.function.arguments = (
|
|
1652
|
+
f"{last_tool_call.function.arguments}{tool_call.function.arguments}"
|
|
1653
|
+
)
|
|
1723
1654
|
|
|
1724
1655
|
response.choices[0].message.content = content
|
|
1725
1656
|
response.choices[0].message.tool_calls = tool_calls
|
|
@@ -1779,11 +1710,11 @@ async def amistral_stream_chat(generator: AsyncIterable[Any]) -> Any:
|
|
|
1779
1710
|
else:
|
|
1780
1711
|
last_tool_call = tool_calls[-1]
|
|
1781
1712
|
if tool_call.function.name:
|
|
1782
|
-
last_tool_call.function.name =
|
|
1783
|
-
f"{last_tool_call.function.name}{tool_call.function.name}"
|
|
1784
|
-
)
|
|
1713
|
+
last_tool_call.function.name = f"{last_tool_call.function.name}{tool_call.function.name}"
|
|
1785
1714
|
if tool_call.function.arguments:
|
|
1786
|
-
last_tool_call.function.arguments =
|
|
1715
|
+
last_tool_call.function.arguments = (
|
|
1716
|
+
f"{last_tool_call.function.arguments}{tool_call.function.arguments}"
|
|
1717
|
+
)
|
|
1787
1718
|
|
|
1788
1719
|
if completion_chunks:
|
|
1789
1720
|
last_result = completion_chunks[-1]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "promptlayer"
|
|
3
|
-
version = "1.0.
|
|
3
|
+
version = "1.0.38"
|
|
4
4
|
description = "PromptLayer is a platform for prompt engineering and tracks your LLM requests."
|
|
5
5
|
authors = ["Magniv <hello@magniv.io>"]
|
|
6
6
|
license = "Apache-2.0"
|
|
@@ -28,3 +28,22 @@ openai = "^1.60.1"
|
|
|
28
28
|
[build-system]
|
|
29
29
|
requires = ["poetry-core"]
|
|
30
30
|
build-backend = "poetry.core.masonry.api"
|
|
31
|
+
|
|
32
|
+
[tool.ruff]
|
|
33
|
+
line-length = 120
|
|
34
|
+
indent-width = 4 # mimic Black
|
|
35
|
+
target-version = "py38"
|
|
36
|
+
|
|
37
|
+
[tool.ruff.lint]
|
|
38
|
+
ignore = ["E501", "E711", "E712"]
|
|
39
|
+
|
|
40
|
+
[tool.ruff.lint.isort]
|
|
41
|
+
combine-as-imports = true
|
|
42
|
+
relative-imports-order = "closest-to-furthest"
|
|
43
|
+
known-first-party = ["promptlayer", "tests"]
|
|
44
|
+
|
|
45
|
+
[tool.ruff.format]
|
|
46
|
+
quote-style = "double" # mimic Black
|
|
47
|
+
indent-style = "space" # also mimic Black
|
|
48
|
+
skip-magic-trailing-comma = false # also mimic Black
|
|
49
|
+
line-ending = "auto" # mimic Black
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|