promptlayer 1.0.21__tar.gz → 1.0.23__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: promptlayer
3
- Version: 1.0.21
3
+ Version: 1.0.23
4
4
  Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
5
5
  License: Apache-2.0
6
6
  Author: Magniv
@@ -12,6 +12,7 @@ Classifier: Programming Language :: Python :: 3.9
12
12
  Classifier: Programming Language :: Python :: 3.10
13
13
  Classifier: Programming Language :: Python :: 3.11
14
14
  Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Programming Language :: Python :: 3.13
15
16
  Requires-Dist: opentelemetry-api (>=1.26.0,<2.0.0)
16
17
  Requires-Dist: opentelemetry-sdk (>=1.26.0,<2.0.0)
17
18
  Requires-Dist: requests (>=2.31.0,<3.0.0)
@@ -1,4 +1,4 @@
1
1
  from .promptlayer import PromptLayer
2
2
 
3
- __version__ = "1.0.21"
3
+ __version__ = "1.0.23"
4
4
  __all__ = ["PromptLayer", "__version__"]
@@ -3,7 +3,7 @@ import datetime
3
3
  import os
4
4
  from copy import deepcopy
5
5
  from functools import wraps
6
- from typing import Any, Dict, List, Literal, Union
6
+ from typing import Any, Dict, List, Literal, Optional, Union
7
7
 
8
8
  from opentelemetry.sdk.resources import Resource
9
9
  from opentelemetry.sdk.trace import TracerProvider
@@ -20,9 +20,11 @@ from promptlayer.utils import (
20
20
  anthropic_request,
21
21
  anthropic_stream_completion,
22
22
  anthropic_stream_message,
23
+ azure_openai_request,
23
24
  openai_request,
24
25
  openai_stream_chat,
25
26
  openai_stream_completion,
27
+ run_workflow_request,
26
28
  stream_response,
27
29
  track_request,
28
30
  util_log_request,
@@ -49,11 +51,22 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
49
51
  "stream_function": anthropic_stream_completion,
50
52
  },
51
53
  },
54
+ "openai.azure": {
55
+ "chat": {
56
+ "function_name": "openai.AzureOpenAI.chat.completions.create",
57
+ "stream_function": openai_stream_chat,
58
+ },
59
+ "completion": {
60
+ "function_name": "openai.AzureOpenAI.completions.create",
61
+ "stream_function": openai_stream_completion,
62
+ },
63
+ },
52
64
  }
53
65
 
54
66
  MAP_PROVIDER_TO_FUNCTION = {
55
67
  "openai": openai_request,
56
68
  "anthropic": anthropic_request,
69
+ "openai.azure": azure_openai_request,
57
70
  }
58
71
 
59
72
 
@@ -169,7 +182,7 @@ class PromptLayer:
169
182
  kwargs["base_url"] = provider_base_url["url"]
170
183
 
171
184
  kwargs["stream"] = stream
172
- if stream and provider == "openai":
185
+ if stream and provider in ["openai", "openai.azure"]:
173
186
  kwargs["stream_options"] = {"include_usage": True}
174
187
 
175
188
  return {
@@ -358,6 +371,27 @@ class PromptLayer:
358
371
  else:
359
372
  return self._run_internal(**_run_internal_kwargs)
360
373
 
374
+ def run_workflow(
375
+ self,
376
+ workflow_name: str,
377
+ input_variables: Optional[Dict[str, Any]] = None,
378
+ metadata: Optional[Dict[str, str]] = None,
379
+ workflow_label_name: Optional[str] = None,
380
+ workflow_version_number: Optional[int] = None,
381
+ ) -> Dict[str, Any]:
382
+ try:
383
+ result = run_workflow_request(
384
+ workflow_name=workflow_name,
385
+ input_variables=input_variables or {},
386
+ metadata=metadata,
387
+ workflow_label_name=workflow_label_name,
388
+ workflow_version_number=workflow_version_number,
389
+ api_key=self.api_key,
390
+ )
391
+ return result
392
+ except Exception as e:
393
+ raise Exception(f"Error running workflow: {str(e)}")
394
+
361
395
  def traceable(self, attributes=None, name=None):
362
396
  def decorator(func):
363
397
  @wraps(func)
@@ -8,7 +8,7 @@ import sys
8
8
  import types
9
9
  from copy import deepcopy
10
10
  from enum import Enum
11
- from typing import Callable, Generator, List, Union
11
+ from typing import Any, Callable, Dict, Generator, List, Optional, Union
12
12
 
13
13
  import requests
14
14
  from opentelemetry import context, trace
@@ -27,6 +27,47 @@ URL_API_PROMPTLAYER = os.environ.setdefault(
27
27
  )
28
28
 
29
29
 
30
+ def run_workflow_request(
31
+ *,
32
+ workflow_name: str,
33
+ input_variables: Dict[str, Any],
34
+ metadata: Optional[Dict[str, str]] = None,
35
+ workflow_label_name: Optional[str] = None,
36
+ workflow_version_number: Optional[int] = None,
37
+ api_key: str,
38
+ ) -> Dict[str, Any]:
39
+ payload = {
40
+ "input_variables": input_variables,
41
+ "metadata": metadata,
42
+ "workflow_label_name": workflow_label_name,
43
+ "workflow_version_number": workflow_version_number,
44
+ }
45
+
46
+ url = f"{URL_API_PROMPTLAYER}/workflows/{workflow_name}/run"
47
+ headers = {"X-API-KEY": api_key}
48
+
49
+ try:
50
+ response = requests.post(url, json=payload, headers=headers)
51
+ except requests.exceptions.RequestException as e:
52
+ error_message = f"Failed to run workflow: {e}"
53
+ print(error_message, file=sys.stderr)
54
+ raise Exception(error_message)
55
+
56
+ if response.status_code != 201:
57
+ try:
58
+ error_details = response.json().get("error", "Unknown error")
59
+ except ValueError:
60
+ error_details = response.text or "Unknown error"
61
+
62
+ error_message = f"Failed to run workflow: {error_details}"
63
+ print(error_message, file=sys.stderr)
64
+ raise Exception(error_message)
65
+
66
+ result = response.json()
67
+
68
+ return result
69
+
70
+
30
71
  def promptlayer_api_handler(
31
72
  function_name,
32
73
  provider_type,
@@ -731,8 +772,10 @@ def openai_stream_chat(results: list):
731
772
  ChatCompletion,
732
773
  ChatCompletionChunk,
733
774
  ChatCompletionMessage,
775
+ ChatCompletionMessageToolCall,
734
776
  )
735
777
  from openai.types.chat.chat_completion import Choice
778
+ from openai.types.chat.chat_completion_message_tool_call import Function
736
779
 
737
780
  chat_completion_chunks: List[ChatCompletionChunk] = results
738
781
  response: ChatCompletion = ChatCompletion(
@@ -755,10 +798,42 @@ def openai_stream_chat(results: list):
755
798
  response.system_fingerprint = last_result.system_fingerprint
756
799
  response.usage = last_result.usage
757
800
  content = ""
801
+ tool_calls: Union[List[ChatCompletionMessageToolCall], None] = None
758
802
  for result in chat_completion_chunks:
759
- if len(result.choices) > 0 and result.choices[0].delta.content:
803
+ choices = result.choices
804
+ if len(choices) == 0:
805
+ continue
806
+ if choices[0].delta.content:
760
807
  content = f"{content}{result.choices[0].delta.content}"
808
+
809
+ delta = choices[0].delta
810
+ if delta.tool_calls:
811
+ tool_calls = tool_calls or []
812
+ last_tool_call = None
813
+ if len(tool_calls) > 0:
814
+ last_tool_call = tool_calls[-1]
815
+ tool_call = delta.tool_calls[0]
816
+ if not tool_call.function:
817
+ continue
818
+ if not last_tool_call or tool_call.id:
819
+ tool_calls.append(
820
+ ChatCompletionMessageToolCall(
821
+ id=tool_call.id or "",
822
+ function=Function(
823
+ name=tool_call.function.name or "",
824
+ arguments=tool_call.function.arguments or "",
825
+ ),
826
+ type=tool_call.type or "function",
827
+ )
828
+ )
829
+ continue
830
+ last_tool_call.function.name = (
831
+ f"{last_tool_call.function.name}{tool_call.function.name or ''}"
832
+ )
833
+ last_tool_call.function.arguments = f"{last_tool_call.function.arguments}{tool_call.function.arguments or ''}"
834
+
761
835
  response.choices[0].message.content = content
836
+ response.choices[0].message.tool_calls = tool_calls
762
837
  return response
763
838
 
764
839
 
@@ -882,6 +957,16 @@ def openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
882
957
  return request_to_make(client, **kwargs)
883
958
 
884
959
 
960
+ def azure_openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
961
+ from openai import AzureOpenAI
962
+
963
+ client = AzureOpenAI(base_url=kwargs.pop("base_url", None))
964
+ request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[
965
+ prompt_blueprint["prompt_template"]["type"]
966
+ ]
967
+ return request_to_make(client, **kwargs)
968
+
969
+
885
970
  def anthropic_chat_request(client, **kwargs):
886
971
  return client.messages.create(**kwargs)
887
972
 
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "promptlayer"
3
- version = "1.0.21"
3
+ version = "1.0.23"
4
4
  description = "PromptLayer is a platform for prompt engineering and tracks your LLM requests."
5
5
  authors = ["Magniv <hello@magniv.io>"]
6
6
  license = "Apache-2.0"
File without changes
File without changes