promptlayer 1.0.20__tar.gz → 1.0.22__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of promptlayer might be problematic. Click here for more details.
- {promptlayer-1.0.20 → promptlayer-1.0.22}/PKG-INFO +2 -1
- {promptlayer-1.0.20 → promptlayer-1.0.22}/promptlayer/__init__.py +1 -1
- {promptlayer-1.0.20 → promptlayer-1.0.22}/promptlayer/promptlayer.py +23 -1
- {promptlayer-1.0.20 → promptlayer-1.0.22}/promptlayer/utils.py +49 -1
- {promptlayer-1.0.20 → promptlayer-1.0.22}/pyproject.toml +1 -1
- {promptlayer-1.0.20 → promptlayer-1.0.22}/LICENSE +0 -0
- {promptlayer-1.0.20 → promptlayer-1.0.22}/README.md +0 -0
- {promptlayer-1.0.20 → promptlayer-1.0.22}/promptlayer/groups/__init__.py +0 -0
- {promptlayer-1.0.20 → promptlayer-1.0.22}/promptlayer/groups/groups.py +0 -0
- {promptlayer-1.0.20 → promptlayer-1.0.22}/promptlayer/promptlayer_base.py +0 -0
- {promptlayer-1.0.20 → promptlayer-1.0.22}/promptlayer/span_exporter.py +0 -0
- {promptlayer-1.0.20 → promptlayer-1.0.22}/promptlayer/templates.py +0 -0
- {promptlayer-1.0.20 → promptlayer-1.0.22}/promptlayer/track/__init__.py +0 -0
- {promptlayer-1.0.20 → promptlayer-1.0.22}/promptlayer/track/track.py +0 -0
- {promptlayer-1.0.20 → promptlayer-1.0.22}/promptlayer/types/__init__.py +0 -0
- {promptlayer-1.0.20 → promptlayer-1.0.22}/promptlayer/types/prompt_template.py +0 -0
- {promptlayer-1.0.20 → promptlayer-1.0.22}/promptlayer/types/request_log.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: promptlayer
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.22
|
|
4
4
|
Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
|
|
5
5
|
License: Apache-2.0
|
|
6
6
|
Author: Magniv
|
|
@@ -12,6 +12,7 @@ Classifier: Programming Language :: Python :: 3.9
|
|
|
12
12
|
Classifier: Programming Language :: Python :: 3.10
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.11
|
|
14
14
|
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
15
16
|
Requires-Dist: opentelemetry-api (>=1.26.0,<2.0.0)
|
|
16
17
|
Requires-Dist: opentelemetry-sdk (>=1.26.0,<2.0.0)
|
|
17
18
|
Requires-Dist: requests (>=2.31.0,<3.0.0)
|
|
@@ -3,7 +3,7 @@ import datetime
|
|
|
3
3
|
import os
|
|
4
4
|
from copy import deepcopy
|
|
5
5
|
from functools import wraps
|
|
6
|
-
from typing import Any, Dict, List, Literal, Union
|
|
6
|
+
from typing import Any, Dict, List, Literal, Optional, Union
|
|
7
7
|
|
|
8
8
|
from opentelemetry.sdk.resources import Resource
|
|
9
9
|
from opentelemetry.sdk.trace import TracerProvider
|
|
@@ -23,6 +23,7 @@ from promptlayer.utils import (
|
|
|
23
23
|
openai_request,
|
|
24
24
|
openai_stream_chat,
|
|
25
25
|
openai_stream_completion,
|
|
26
|
+
run_workflow_request,
|
|
26
27
|
stream_response,
|
|
27
28
|
track_request,
|
|
28
29
|
util_log_request,
|
|
@@ -358,6 +359,27 @@ class PromptLayer:
|
|
|
358
359
|
else:
|
|
359
360
|
return self._run_internal(**_run_internal_kwargs)
|
|
360
361
|
|
|
362
|
+
def run_workflow(
|
|
363
|
+
self,
|
|
364
|
+
workflow_name: str,
|
|
365
|
+
input_variables: Optional[Dict[str, Any]] = None,
|
|
366
|
+
metadata: Optional[Dict[str, str]] = None,
|
|
367
|
+
workflow_label_name: Optional[str] = None,
|
|
368
|
+
workflow_version_number: Optional[int] = None,
|
|
369
|
+
) -> Dict[str, Any]:
|
|
370
|
+
try:
|
|
371
|
+
result = run_workflow_request(
|
|
372
|
+
workflow_name=workflow_name,
|
|
373
|
+
input_variables=input_variables or {},
|
|
374
|
+
metadata=metadata,
|
|
375
|
+
workflow_label_name=workflow_label_name,
|
|
376
|
+
workflow_version_number=workflow_version_number,
|
|
377
|
+
api_key=self.api_key,
|
|
378
|
+
)
|
|
379
|
+
return result
|
|
380
|
+
except Exception as e:
|
|
381
|
+
raise Exception(f"Error running workflow: {str(e)}")
|
|
382
|
+
|
|
361
383
|
def traceable(self, attributes=None, name=None):
|
|
362
384
|
def decorator(func):
|
|
363
385
|
@wraps(func)
|
|
@@ -8,7 +8,7 @@ import sys
|
|
|
8
8
|
import types
|
|
9
9
|
from copy import deepcopy
|
|
10
10
|
from enum import Enum
|
|
11
|
-
from typing import Callable, Generator, List, Union
|
|
11
|
+
from typing import Any, Callable, Dict, Generator, List, Optional, Union
|
|
12
12
|
|
|
13
13
|
import requests
|
|
14
14
|
from opentelemetry import context, trace
|
|
@@ -27,6 +27,47 @@ URL_API_PROMPTLAYER = os.environ.setdefault(
|
|
|
27
27
|
)
|
|
28
28
|
|
|
29
29
|
|
|
30
|
+
def run_workflow_request(
|
|
31
|
+
*,
|
|
32
|
+
workflow_name: str,
|
|
33
|
+
input_variables: Dict[str, Any],
|
|
34
|
+
metadata: Optional[Dict[str, str]] = None,
|
|
35
|
+
workflow_label_name: Optional[str] = None,
|
|
36
|
+
workflow_version_number: Optional[int] = None,
|
|
37
|
+
api_key: str,
|
|
38
|
+
) -> Dict[str, Any]:
|
|
39
|
+
payload = {
|
|
40
|
+
"input_variables": input_variables,
|
|
41
|
+
"metadata": metadata,
|
|
42
|
+
"workflow_label_name": workflow_label_name,
|
|
43
|
+
"workflow_version_number": workflow_version_number,
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
url = f"{URL_API_PROMPTLAYER}/workflows/{workflow_name}/run"
|
|
47
|
+
headers = {"X-API-KEY": api_key}
|
|
48
|
+
|
|
49
|
+
try:
|
|
50
|
+
response = requests.post(url, json=payload, headers=headers)
|
|
51
|
+
except requests.exceptions.RequestException as e:
|
|
52
|
+
error_message = f"Failed to run workflow: {e}"
|
|
53
|
+
print(error_message, file=sys.stderr)
|
|
54
|
+
raise Exception(error_message)
|
|
55
|
+
|
|
56
|
+
if response.status_code != 201:
|
|
57
|
+
try:
|
|
58
|
+
error_details = response.json().get("error", "Unknown error")
|
|
59
|
+
except ValueError:
|
|
60
|
+
error_details = response.text or "Unknown error"
|
|
61
|
+
|
|
62
|
+
error_message = f"Failed to run workflow: {error_details}"
|
|
63
|
+
print(error_message, file=sys.stderr)
|
|
64
|
+
raise Exception(error_message)
|
|
65
|
+
|
|
66
|
+
result = response.json()
|
|
67
|
+
|
|
68
|
+
return result
|
|
69
|
+
|
|
70
|
+
|
|
30
71
|
def promptlayer_api_handler(
|
|
31
72
|
function_name,
|
|
32
73
|
provider_type,
|
|
@@ -666,6 +707,13 @@ def get_prompt_template(
|
|
|
666
707
|
raise Exception(
|
|
667
708
|
f"PromptLayer had the following error while getting your prompt template: {response.text}"
|
|
668
709
|
)
|
|
710
|
+
|
|
711
|
+
warning = response.json().get("warning", None)
|
|
712
|
+
if warning is not None:
|
|
713
|
+
warn_on_bad_response(
|
|
714
|
+
warning,
|
|
715
|
+
"WARNING: While getting your prompt template",
|
|
716
|
+
)
|
|
669
717
|
return response.json()
|
|
670
718
|
except requests.exceptions.RequestException as e:
|
|
671
719
|
raise Exception(
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|