promptlayer 1.0.2__tar.gz → 1.0.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: promptlayer
3
- Version: 1.0.2
3
+ Version: 1.0.3
4
4
  Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
5
5
  License: Apache-2.0
6
6
  Author: Magniv
@@ -0,0 +1,169 @@
1
+ import datetime
2
+ import os
3
+ from copy import deepcopy
4
+ from typing import Dict, List, Literal, Union
5
+
6
+ from promptlayer.groups import GroupManager
7
+ from promptlayer.promptlayer import PromptLayerBase
8
+ from promptlayer.templates import TemplateManager
9
+ from promptlayer.track import TrackManager
10
+ from promptlayer.types.prompt_template import GetPromptTemplate
11
+ from promptlayer.utils import (
12
+ anthropic_request,
13
+ anthropic_stream_completion,
14
+ anthropic_stream_message,
15
+ openai_request,
16
+ openai_stream_chat,
17
+ openai_stream_completion,
18
+ stream_response,
19
+ track_request,
20
+ )
21
+
22
+ MAP_PROVIDER_TO_FUNCTION_NAME = {
23
+ "openai": {
24
+ "chat": {
25
+ "function_name": "openai.chat.completions.create",
26
+ "stream_function": openai_stream_chat,
27
+ },
28
+ "completion": {
29
+ "function_name": "openai.completions.create",
30
+ "stream_function": openai_stream_completion,
31
+ },
32
+ },
33
+ "anthropic": {
34
+ "chat": {
35
+ "function_name": "anthropic.messages.create",
36
+ "stream_function": anthropic_stream_message,
37
+ },
38
+ "completion": {
39
+ "function_name": "anthropic.completions.create",
40
+ "stream_function": anthropic_stream_completion,
41
+ },
42
+ },
43
+ }
44
+
45
+ MAP_PROVIDER_TO_FUNCTION = {
46
+ "openai": openai_request,
47
+ "anthropic": anthropic_request,
48
+ }
49
+
50
+
51
+ class PromptLayer:
52
+ def __init__(self, api_key: str = None):
53
+ if api_key is None:
54
+ api_key = os.environ.get("PROMPTLAYER_API_KEY")
55
+ if api_key is None:
56
+ raise ValueError(
57
+ "PromptLayer API key not provided. Please set the PROMPTLAYER_API_KEY environment variable or pass the api_key parameter."
58
+ )
59
+ self.api_key = api_key
60
+ self.templates = TemplateManager(api_key)
61
+ self.group = GroupManager(api_key)
62
+ self.track = TrackManager(api_key)
63
+
64
+ def __getattr__(
65
+ self,
66
+ name: Union[Literal["openai"], Literal["anthropic"], Literal["prompts"]],
67
+ ):
68
+ if name == "openai":
69
+ import openai as openai_module
70
+
71
+ openai = PromptLayerBase(
72
+ openai_module, function_name="openai", api_key=self.api_key
73
+ )
74
+ return openai
75
+ elif name == "anthropic":
76
+ import anthropic as anthropic_module
77
+
78
+ anthropic = PromptLayerBase(
79
+ anthropic_module,
80
+ function_name="anthropic",
81
+ provider_type="anthropic",
82
+ api_key=self.api_key,
83
+ )
84
+ return anthropic
85
+ else:
86
+ raise AttributeError(f"module {__name__} has no attribute {name}")
87
+
88
+ def run(
89
+ self,
90
+ prompt_name: str,
91
+ prompt_version: Union[int, None] = None,
92
+ prompt_release_label: Union[str, None] = None,
93
+ input_variables: Union[Dict[str, str], None] = None,
94
+ tags: Union[List[str], None] = None,
95
+ metadata: Union[Dict[str, str], None] = None,
96
+ group_id: Union[int, None] = None,
97
+ stream=False,
98
+ ):
99
+ template_get_params: GetPromptTemplate = {}
100
+ if prompt_version:
101
+ template_get_params["version"] = prompt_version
102
+ if prompt_release_label:
103
+ template_get_params["label"] = prompt_release_label
104
+ if input_variables:
105
+ template_get_params["input_variables"] = input_variables
106
+ prompt_blueprint = self.templates.get(prompt_name, template_get_params)
107
+ prompt_template = prompt_blueprint["prompt_template"]
108
+ if not prompt_blueprint["llm_kwargs"]:
109
+ raise Exception(
110
+ f"Prompt '{prompt_name}' does not have any LLM kwargs associated with it."
111
+ )
112
+ prompt_blueprint_metadata = prompt_blueprint.get("metadata", None)
113
+ if prompt_blueprint_metadata is None:
114
+ raise Exception(
115
+ f"Prompt '{prompt_name}' does not have any metadata associated with it."
116
+ )
117
+ prompt_blueprint_model = prompt_blueprint_metadata.get("model", None)
118
+ if prompt_blueprint_model is None:
119
+ raise Exception(
120
+ f"Prompt '{prompt_name}' does not have a model parameters associated with it."
121
+ )
122
+ provider = prompt_blueprint_model["provider"]
123
+ request_start_time = datetime.datetime.now(datetime.timezone.utc).timestamp()
124
+ kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
125
+ config = MAP_PROVIDER_TO_FUNCTION_NAME[provider][prompt_template["type"]]
126
+ function_name = config["function_name"]
127
+ stream_function = config["stream_function"]
128
+ request_function = MAP_PROVIDER_TO_FUNCTION[provider]
129
+ provider_base_url = prompt_blueprint.get("provider_base_url", None)
130
+ if provider_base_url:
131
+ kwargs["base_url"] = provider_base_url["url"]
132
+ kwargs["stream"] = stream
133
+ if stream and provider == "openai":
134
+ kwargs["stream_options"] = {"include_usage": True}
135
+ response = request_function(prompt_blueprint, **kwargs)
136
+
137
+ def _track_request(**body):
138
+ request_end_time = datetime.datetime.now(datetime.timezone.utc).timestamp()
139
+ return track_request(
140
+ function_name=function_name,
141
+ provider_type=provider,
142
+ args=[],
143
+ kwargs=kwargs,
144
+ tags=tags,
145
+ request_start_time=request_start_time,
146
+ request_end_time=request_end_time,
147
+ api_key=self.api_key,
148
+ metadata=metadata,
149
+ prompt_id=prompt_blueprint["id"],
150
+ prompt_version=prompt_blueprint["version"],
151
+ prompt_input_variables=input_variables,
152
+ group_id=group_id,
153
+ return_prompt_blueprint=True,
154
+ **body,
155
+ )
156
+
157
+ if stream:
158
+ return stream_response(response, _track_request, stream_function)
159
+ request_log = _track_request(request_response=response.model_dump())
160
+ data = {
161
+ "request_id": request_log["request_id"],
162
+ "raw_response": response,
163
+ "prompt_blueprint": request_log["prompt_blueprint"],
164
+ }
165
+ return data
166
+
167
+
168
+ __version__ = "1.0.3"
169
+ __all__ = ["PromptLayer", "__version__"]
@@ -1,5 +1,7 @@
1
1
  from typing import Dict, List, Literal, Optional, Sequence, TypedDict, Union
2
2
 
3
+ from typing_extensions import Required
4
+
3
5
 
4
6
  class GetPromptTemplate(TypedDict, total=False):
5
7
  version: int
@@ -110,15 +112,15 @@ Message = Union[
110
112
 
111
113
 
112
114
  class CompletionPromptTemplate(TypedDict, total=False):
113
- type: Literal["completion"]
115
+ type: Required[Literal["completion"]]
114
116
  template_format: TemplateFormat
115
117
  content: Sequence[Content]
116
118
  input_variables: List[str]
117
119
 
118
120
 
119
121
  class ChatPromptTemplate(TypedDict, total=False):
120
- type: Literal["chat"]
121
- messages: Sequence[Message]
122
+ type: Required[Literal["chat"]]
123
+ messages: Required[Sequence[Message]]
122
124
  functions: Sequence[Function]
123
125
  function_call: Union[Literal["auto", "none"], ChatFunctionCall]
124
126
  input_variables: List[str]
@@ -130,9 +132,9 @@ PromptTemplate = Union[CompletionPromptTemplate, ChatPromptTemplate]
130
132
 
131
133
 
132
134
  class Model(TypedDict, total=False):
133
- provider: str
134
- name: str
135
- parameters: Dict[str, object]
135
+ provider: Required[str]
136
+ name: Required[str]
137
+ parameters: Required[Dict[str, object]]
136
138
 
137
139
 
138
140
  class Metadata(TypedDict, total=False):
@@ -154,13 +156,27 @@ class PublishPromptTemplate(BasePromptTemplate, PromptVersion, total=False):
154
156
  release_labels: Optional[List[str]] = None
155
157
 
156
158
 
157
- class BasePromptTemplateResponse(TypedDict):
158
- id: int
159
- prompt_name: str
159
+ class BaseProviderBaseURL(TypedDict):
160
+ name: Required[str]
161
+ provider: Required[str]
162
+ url: Required[str]
163
+
164
+
165
+ class ProviderBaseURL(BaseProviderBaseURL):
166
+ id: Required[int]
167
+
168
+
169
+ class BasePromptTemplateResponse(TypedDict, total=False):
170
+ id: Required[int]
171
+ prompt_name: Required[str]
160
172
  tags: List[str]
161
- prompt_template: PromptTemplate
173
+ prompt_template: Required[PromptTemplate]
162
174
  commit_message: str
163
175
  metadata: Metadata
176
+ provider_base_url: ProviderBaseURL
177
+
178
+
179
+ a: BasePromptTemplateResponse = {"provider_base_url": {"url": ""}}
164
180
 
165
181
 
166
182
  class PublishPromptTemplateResponse(BasePromptTemplateResponse):
@@ -8,7 +8,7 @@ import sys
8
8
  import types
9
9
  from copy import deepcopy
10
10
  from enum import Enum
11
- from typing import List, Union
11
+ from typing import Callable, Generator, List, Union
12
12
 
13
13
  import requests
14
14
 
@@ -642,3 +642,203 @@ def get_all_prompt_templates(
642
642
  raise Exception(
643
643
  f"PromptLayer had the following error while getting all your prompt templates: {e}"
644
644
  )
645
+
646
+
647
+ def track_request(**body):
648
+ try:
649
+ response = requests.post(
650
+ f"{URL_API_PROMPTLAYER}/track-request",
651
+ json=body,
652
+ )
653
+ if response.status_code != 200:
654
+ warn_on_bad_response(
655
+ response,
656
+ f"PromptLayer had the following error while tracking your request: {response.text}",
657
+ )
658
+ return response.json()
659
+ except requests.exceptions.RequestException as e:
660
+ print(
661
+ f"WARNING: While logging your request PromptLayer had the following error: {e}",
662
+ file=sys.stderr,
663
+ )
664
+ return {}
665
+
666
+
667
+ def openai_stream_chat(results: list):
668
+ from openai.types.chat import (
669
+ ChatCompletion,
670
+ ChatCompletionChunk,
671
+ ChatCompletionMessage,
672
+ )
673
+ from openai.types.chat.chat_completion import Choice
674
+
675
+ chat_completion_chunks: List[ChatCompletionChunk] = results
676
+ response: ChatCompletion = ChatCompletion(
677
+ id="",
678
+ object="chat.completion",
679
+ choices=[
680
+ Choice(
681
+ finish_reason="stop",
682
+ index=0,
683
+ message=ChatCompletionMessage(role="assistant"),
684
+ )
685
+ ],
686
+ created=0,
687
+ model="",
688
+ )
689
+ last_result = chat_completion_chunks[-1]
690
+ response.id = last_result.id
691
+ response.created = last_result.created
692
+ response.model = last_result.model
693
+ response.system_fingerprint = last_result.system_fingerprint
694
+ response.usage = last_result.usage
695
+ content = ""
696
+ for result in chat_completion_chunks:
697
+ if len(result.choices) > 0 and result.choices[0].delta.content:
698
+ content = f"{content}{result.choices[0].delta.content}"
699
+ response.choices[0].message.content = content
700
+ return response
701
+
702
+
703
+ def openai_stream_completion(results: list):
704
+ from openai.types.completion import Completion, CompletionChoice
705
+
706
+ completions: List[Completion] = results
707
+ last_chunk = completions[-1]
708
+ response = Completion(
709
+ id=last_chunk.id,
710
+ created=last_chunk.created,
711
+ model=last_chunk.model,
712
+ object="text_completion",
713
+ choices=[CompletionChoice(finish_reason="stop", index=0, text="")],
714
+ )
715
+ text = ""
716
+ for completion in completions:
717
+ usage = completion.usage
718
+ system_fingerprint = completion.system_fingerprint
719
+ if len(completion.choices) > 0 and completion.choices[0].text:
720
+ text = f"{text}{completion.choices[0].text}"
721
+ if usage:
722
+ response.usage = usage
723
+ if system_fingerprint:
724
+ response.system_fingerprint = system_fingerprint
725
+ response.choices[0].text = text
726
+ return response
727
+
728
+
729
+ def anthropic_stream_message(results: list):
730
+ from anthropic.types import Message, MessageStreamEvent, TextBlock, Usage
731
+
732
+ message_stream_events: List[MessageStreamEvent] = results
733
+ response: Message = Message(
734
+ id="",
735
+ model="",
736
+ content=[],
737
+ role="assistant",
738
+ type="message",
739
+ stop_reason="stop_sequence",
740
+ stop_sequence=None,
741
+ usage=Usage(input_tokens=0, output_tokens=0),
742
+ )
743
+ content = ""
744
+ for result in message_stream_events:
745
+ if result.type == "message_start":
746
+ response = result.message
747
+ elif result.type == "content_block_delta":
748
+ if result.delta.type == "text_delta":
749
+ content = f"{content}{result.delta.text}"
750
+ elif result.type == "message_delta":
751
+ if hasattr(result, "usage"):
752
+ response.usage.output_tokens = result.usage.output_tokens
753
+ if hasattr(result.delta, "stop_reason"):
754
+ response.stop_reason = result.delta.stop_reason
755
+ response.content.append(TextBlock(type="text", text=content))
756
+ return response
757
+
758
+
759
+ def anthropic_stream_completion(results: list):
760
+ from anthropic.types import Completion
761
+
762
+ completions: List[Completion] = results
763
+ last_chunk = completions[-1]
764
+ response = Completion(
765
+ id=last_chunk.id,
766
+ completion="",
767
+ model=last_chunk.model,
768
+ stop_reason="stop",
769
+ type="completion",
770
+ )
771
+
772
+ text = ""
773
+ for completion in completions:
774
+ text = f"{text}{completion.completion}"
775
+ response.completion = text
776
+ return response
777
+
778
+
779
+ def stream_response(
780
+ generator: Generator, after_stream: Callable, map_results: Callable
781
+ ):
782
+ data = {
783
+ "request_id": None,
784
+ "raw_response": None,
785
+ "prompt_blueprint": None,
786
+ }
787
+ results = []
788
+ for result in generator:
789
+ results.append(result)
790
+ data["raw_response"] = result
791
+ yield data
792
+ request_response = map_results(results)
793
+ response = after_stream(request_response=request_response.model_dump())
794
+ data["request_id"] = response.get("request_id")
795
+ data["prompt_blueprint"] = response.get("prompt_blueprint")
796
+ yield data
797
+
798
+
799
+ def openai_chat_request(client, **kwargs):
800
+ return client.chat.completions.create(**kwargs)
801
+
802
+
803
+ def openai_completions_request(client, **kwargs):
804
+ return client.completions.create(**kwargs)
805
+
806
+
807
+ MAP_TYPE_TO_OPENAI_FUNCTION = {
808
+ "chat": openai_chat_request,
809
+ "completion": openai_completions_request,
810
+ }
811
+
812
+
813
+ def openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
814
+ from openai import OpenAI
815
+
816
+ client = OpenAI(base_url=kwargs.pop("base_url", None))
817
+ request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[
818
+ prompt_blueprint["prompt_template"]["type"]
819
+ ]
820
+ return request_to_make(client, **kwargs)
821
+
822
+
823
+ def anthropic_chat_request(client, **kwargs):
824
+ return client.messages.create(**kwargs)
825
+
826
+
827
+ def anthropic_completions_request(client, **kwargs):
828
+ return client.completions.create(**kwargs)
829
+
830
+
831
+ MAP_TYPE_TO_ANTHROPIC_FUNCTION = {
832
+ "chat": anthropic_chat_request,
833
+ "completion": anthropic_completions_request,
834
+ }
835
+
836
+
837
+ def anthropic_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
838
+ from anthropic import Anthropic
839
+
840
+ client = Anthropic(base_url=kwargs.pop("base_url", None))
841
+ request_to_make = MAP_TYPE_TO_ANTHROPIC_FUNCTION[
842
+ prompt_blueprint["prompt_template"]["type"]
843
+ ]
844
+ return request_to_make(client, **kwargs)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "promptlayer"
3
- version = "1.0.2"
3
+ version = "1.0.3"
4
4
  description = "PromptLayer is a platform for prompt engineering and tracks your LLM requests."
5
5
  authors = ["Magniv <hello@magniv.io>"]
6
6
  license = "Apache-2.0"
@@ -1,49 +0,0 @@
1
- import os
2
- from typing import Literal, Union
3
-
4
- from promptlayer.groups import GroupManager
5
- from promptlayer.promptlayer import PromptLayerBase
6
- from promptlayer.templates import TemplateManager
7
- from promptlayer.track import TrackManager
8
-
9
-
10
- class PromptLayer:
11
- def __init__(self, api_key: str = None):
12
- if api_key is None:
13
- api_key = os.environ.get("PROMPTLAYER_API_KEY")
14
- if api_key is None:
15
- raise ValueError(
16
- "PromptLayer API key not provided. Please set the PROMPTLAYER_API_KEY environment variable or pass the api_key parameter."
17
- )
18
- self.api_key = api_key
19
- self.templates = TemplateManager(api_key)
20
- self.group = GroupManager(api_key)
21
- self.track = TrackManager(api_key)
22
-
23
- def __getattr__(
24
- self,
25
- name: Union[Literal["openai"], Literal["anthropic"], Literal["prompts"]],
26
- ):
27
- if name == "openai":
28
- import openai as openai_module
29
-
30
- openai = PromptLayerBase(
31
- openai_module, function_name="openai", api_key=self.api_key
32
- )
33
- return openai
34
- elif name == "anthropic":
35
- import anthropic as anthropic_module
36
-
37
- anthropic = PromptLayerBase(
38
- anthropic_module,
39
- function_name="anthropic",
40
- provider_type="anthropic",
41
- api_key=self.api_key,
42
- )
43
- return anthropic
44
- else:
45
- raise AttributeError(f"module {__name__} has no attribute {name}")
46
-
47
-
48
- __version__ = "1.0.2"
49
- __all__ = ["PromptLayer", "__version__"]
File without changes
File without changes