promptlayer 1.0.21__tar.gz → 1.0.22__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: promptlayer
3
- Version: 1.0.21
3
+ Version: 1.0.22
4
4
  Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
5
5
  License: Apache-2.0
6
6
  Author: Magniv
@@ -12,6 +12,7 @@ Classifier: Programming Language :: Python :: 3.9
12
12
  Classifier: Programming Language :: Python :: 3.10
13
13
  Classifier: Programming Language :: Python :: 3.11
14
14
  Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Programming Language :: Python :: 3.13
15
16
  Requires-Dist: opentelemetry-api (>=1.26.0,<2.0.0)
16
17
  Requires-Dist: opentelemetry-sdk (>=1.26.0,<2.0.0)
17
18
  Requires-Dist: requests (>=2.31.0,<3.0.0)
@@ -1,4 +1,4 @@
1
1
  from .promptlayer import PromptLayer
2
2
 
3
- __version__ = "1.0.21"
3
+ __version__ = "1.0.22"
4
4
  __all__ = ["PromptLayer", "__version__"]
@@ -3,7 +3,7 @@ import datetime
3
3
  import os
4
4
  from copy import deepcopy
5
5
  from functools import wraps
6
- from typing import Any, Dict, List, Literal, Union
6
+ from typing import Any, Dict, List, Literal, Optional, Union
7
7
 
8
8
  from opentelemetry.sdk.resources import Resource
9
9
  from opentelemetry.sdk.trace import TracerProvider
@@ -23,6 +23,7 @@ from promptlayer.utils import (
23
23
  openai_request,
24
24
  openai_stream_chat,
25
25
  openai_stream_completion,
26
+ run_workflow_request,
26
27
  stream_response,
27
28
  track_request,
28
29
  util_log_request,
@@ -358,6 +359,27 @@ class PromptLayer:
358
359
  else:
359
360
  return self._run_internal(**_run_internal_kwargs)
360
361
 
362
+ def run_workflow(
363
+ self,
364
+ workflow_name: str,
365
+ input_variables: Optional[Dict[str, Any]] = None,
366
+ metadata: Optional[Dict[str, str]] = None,
367
+ workflow_label_name: Optional[str] = None,
368
+ workflow_version_number: Optional[int] = None,
369
+ ) -> Dict[str, Any]:
370
+ try:
371
+ result = run_workflow_request(
372
+ workflow_name=workflow_name,
373
+ input_variables=input_variables or {},
374
+ metadata=metadata,
375
+ workflow_label_name=workflow_label_name,
376
+ workflow_version_number=workflow_version_number,
377
+ api_key=self.api_key,
378
+ )
379
+ return result
380
+ except Exception as e:
381
+ raise Exception(f"Error running workflow: {str(e)}")
382
+
361
383
  def traceable(self, attributes=None, name=None):
362
384
  def decorator(func):
363
385
  @wraps(func)
@@ -8,7 +8,7 @@ import sys
8
8
  import types
9
9
  from copy import deepcopy
10
10
  from enum import Enum
11
- from typing import Callable, Generator, List, Union
11
+ from typing import Any, Callable, Dict, Generator, List, Optional, Union
12
12
 
13
13
  import requests
14
14
  from opentelemetry import context, trace
@@ -27,6 +27,47 @@ URL_API_PROMPTLAYER = os.environ.setdefault(
27
27
  )
28
28
 
29
29
 
30
+ def run_workflow_request(
31
+ *,
32
+ workflow_name: str,
33
+ input_variables: Dict[str, Any],
34
+ metadata: Optional[Dict[str, str]] = None,
35
+ workflow_label_name: Optional[str] = None,
36
+ workflow_version_number: Optional[int] = None,
37
+ api_key: str,
38
+ ) -> Dict[str, Any]:
39
+ payload = {
40
+ "input_variables": input_variables,
41
+ "metadata": metadata,
42
+ "workflow_label_name": workflow_label_name,
43
+ "workflow_version_number": workflow_version_number,
44
+ }
45
+
46
+ url = f"{URL_API_PROMPTLAYER}/workflows/{workflow_name}/run"
47
+ headers = {"X-API-KEY": api_key}
48
+
49
+ try:
50
+ response = requests.post(url, json=payload, headers=headers)
51
+ except requests.exceptions.RequestException as e:
52
+ error_message = f"Failed to run workflow: {e}"
53
+ print(error_message, file=sys.stderr)
54
+ raise Exception(error_message)
55
+
56
+ if response.status_code != 201:
57
+ try:
58
+ error_details = response.json().get("error", "Unknown error")
59
+ except ValueError:
60
+ error_details = response.text or "Unknown error"
61
+
62
+ error_message = f"Failed to run workflow: {error_details}"
63
+ print(error_message, file=sys.stderr)
64
+ raise Exception(error_message)
65
+
66
+ result = response.json()
67
+
68
+ return result
69
+
70
+
30
71
  def promptlayer_api_handler(
31
72
  function_name,
32
73
  provider_type,
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "promptlayer"
3
- version = "1.0.21"
3
+ version = "1.0.22"
4
4
  description = "PromptLayer is a platform for prompt engineering and tracks your LLM requests."
5
5
  authors = ["Magniv <hello@magniv.io>"]
6
6
  license = "Apache-2.0"
File without changes
File without changes