promptlayer 1.0.2__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of promptlayer might be problematic. Click here for more details.
- promptlayer/__init__.py +122 -2
- promptlayer/types/prompt_template.py +26 -10
- promptlayer/utils.py +201 -1
- {promptlayer-1.0.2.dist-info → promptlayer-1.0.3.dist-info}/METADATA +1 -1
- {promptlayer-1.0.2.dist-info → promptlayer-1.0.3.dist-info}/RECORD +7 -7
- {promptlayer-1.0.2.dist-info → promptlayer-1.0.3.dist-info}/LICENSE +0 -0
- {promptlayer-1.0.2.dist-info → promptlayer-1.0.3.dist-info}/WHEEL +0 -0
promptlayer/__init__.py
CHANGED
|
@@ -1,10 +1,51 @@
|
|
|
1
|
+
import datetime
|
|
1
2
|
import os
|
|
2
|
-
from
|
|
3
|
+
from copy import deepcopy
|
|
4
|
+
from typing import Dict, List, Literal, Union
|
|
3
5
|
|
|
4
6
|
from promptlayer.groups import GroupManager
|
|
5
7
|
from promptlayer.promptlayer import PromptLayerBase
|
|
6
8
|
from promptlayer.templates import TemplateManager
|
|
7
9
|
from promptlayer.track import TrackManager
|
|
10
|
+
from promptlayer.types.prompt_template import GetPromptTemplate
|
|
11
|
+
from promptlayer.utils import (
|
|
12
|
+
anthropic_request,
|
|
13
|
+
anthropic_stream_completion,
|
|
14
|
+
anthropic_stream_message,
|
|
15
|
+
openai_request,
|
|
16
|
+
openai_stream_chat,
|
|
17
|
+
openai_stream_completion,
|
|
18
|
+
stream_response,
|
|
19
|
+
track_request,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
MAP_PROVIDER_TO_FUNCTION_NAME = {
|
|
23
|
+
"openai": {
|
|
24
|
+
"chat": {
|
|
25
|
+
"function_name": "openai.chat.completions.create",
|
|
26
|
+
"stream_function": openai_stream_chat,
|
|
27
|
+
},
|
|
28
|
+
"completion": {
|
|
29
|
+
"function_name": "openai.completions.create",
|
|
30
|
+
"stream_function": openai_stream_completion,
|
|
31
|
+
},
|
|
32
|
+
},
|
|
33
|
+
"anthropic": {
|
|
34
|
+
"chat": {
|
|
35
|
+
"function_name": "anthropic.messages.create",
|
|
36
|
+
"stream_function": anthropic_stream_message,
|
|
37
|
+
},
|
|
38
|
+
"completion": {
|
|
39
|
+
"function_name": "anthropic.completions.create",
|
|
40
|
+
"stream_function": anthropic_stream_completion,
|
|
41
|
+
},
|
|
42
|
+
},
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
MAP_PROVIDER_TO_FUNCTION = {
|
|
46
|
+
"openai": openai_request,
|
|
47
|
+
"anthropic": anthropic_request,
|
|
48
|
+
}
|
|
8
49
|
|
|
9
50
|
|
|
10
51
|
class PromptLayer:
|
|
@@ -44,6 +85,85 @@ class PromptLayer:
|
|
|
44
85
|
else:
|
|
45
86
|
raise AttributeError(f"module {__name__} has no attribute {name}")
|
|
46
87
|
|
|
88
|
+
def run(
|
|
89
|
+
self,
|
|
90
|
+
prompt_name: str,
|
|
91
|
+
prompt_version: Union[int, None] = None,
|
|
92
|
+
prompt_release_label: Union[str, None] = None,
|
|
93
|
+
input_variables: Union[Dict[str, str], None] = None,
|
|
94
|
+
tags: Union[List[str], None] = None,
|
|
95
|
+
metadata: Union[Dict[str, str], None] = None,
|
|
96
|
+
group_id: Union[int, None] = None,
|
|
97
|
+
stream=False,
|
|
98
|
+
):
|
|
99
|
+
template_get_params: GetPromptTemplate = {}
|
|
100
|
+
if prompt_version:
|
|
101
|
+
template_get_params["version"] = prompt_version
|
|
102
|
+
if prompt_release_label:
|
|
103
|
+
template_get_params["label"] = prompt_release_label
|
|
104
|
+
if input_variables:
|
|
105
|
+
template_get_params["input_variables"] = input_variables
|
|
106
|
+
prompt_blueprint = self.templates.get(prompt_name, template_get_params)
|
|
107
|
+
prompt_template = prompt_blueprint["prompt_template"]
|
|
108
|
+
if not prompt_blueprint["llm_kwargs"]:
|
|
109
|
+
raise Exception(
|
|
110
|
+
f"Prompt '{prompt_name}' does not have any LLM kwargs associated with it."
|
|
111
|
+
)
|
|
112
|
+
prompt_blueprint_metadata = prompt_blueprint.get("metadata", None)
|
|
113
|
+
if prompt_blueprint_metadata is None:
|
|
114
|
+
raise Exception(
|
|
115
|
+
f"Prompt '{prompt_name}' does not have any metadata associated with it."
|
|
116
|
+
)
|
|
117
|
+
prompt_blueprint_model = prompt_blueprint_metadata.get("model", None)
|
|
118
|
+
if prompt_blueprint_model is None:
|
|
119
|
+
raise Exception(
|
|
120
|
+
f"Prompt '{prompt_name}' does not have a model parameters associated with it."
|
|
121
|
+
)
|
|
122
|
+
provider = prompt_blueprint_model["provider"]
|
|
123
|
+
request_start_time = datetime.datetime.now(datetime.timezone.utc).timestamp()
|
|
124
|
+
kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
|
|
125
|
+
config = MAP_PROVIDER_TO_FUNCTION_NAME[provider][prompt_template["type"]]
|
|
126
|
+
function_name = config["function_name"]
|
|
127
|
+
stream_function = config["stream_function"]
|
|
128
|
+
request_function = MAP_PROVIDER_TO_FUNCTION[provider]
|
|
129
|
+
provider_base_url = prompt_blueprint.get("provider_base_url", None)
|
|
130
|
+
if provider_base_url:
|
|
131
|
+
kwargs["base_url"] = provider_base_url["url"]
|
|
132
|
+
kwargs["stream"] = stream
|
|
133
|
+
if stream and provider == "openai":
|
|
134
|
+
kwargs["stream_options"] = {"include_usage": True}
|
|
135
|
+
response = request_function(prompt_blueprint, **kwargs)
|
|
136
|
+
|
|
137
|
+
def _track_request(**body):
|
|
138
|
+
request_end_time = datetime.datetime.now(datetime.timezone.utc).timestamp()
|
|
139
|
+
return track_request(
|
|
140
|
+
function_name=function_name,
|
|
141
|
+
provider_type=provider,
|
|
142
|
+
args=[],
|
|
143
|
+
kwargs=kwargs,
|
|
144
|
+
tags=tags,
|
|
145
|
+
request_start_time=request_start_time,
|
|
146
|
+
request_end_time=request_end_time,
|
|
147
|
+
api_key=self.api_key,
|
|
148
|
+
metadata=metadata,
|
|
149
|
+
prompt_id=prompt_blueprint["id"],
|
|
150
|
+
prompt_version=prompt_blueprint["version"],
|
|
151
|
+
prompt_input_variables=input_variables,
|
|
152
|
+
group_id=group_id,
|
|
153
|
+
return_prompt_blueprint=True,
|
|
154
|
+
**body,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
if stream:
|
|
158
|
+
return stream_response(response, _track_request, stream_function)
|
|
159
|
+
request_log = _track_request(request_response=response.model_dump())
|
|
160
|
+
data = {
|
|
161
|
+
"request_id": request_log["request_id"],
|
|
162
|
+
"raw_response": response,
|
|
163
|
+
"prompt_blueprint": request_log["prompt_blueprint"],
|
|
164
|
+
}
|
|
165
|
+
return data
|
|
166
|
+
|
|
47
167
|
|
|
48
|
-
__version__ = "1.0.
|
|
168
|
+
__version__ = "1.0.3"
|
|
49
169
|
__all__ = ["PromptLayer", "__version__"]
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
from typing import Dict, List, Literal, Optional, Sequence, TypedDict, Union
|
|
2
2
|
|
|
3
|
+
from typing_extensions import Required
|
|
4
|
+
|
|
3
5
|
|
|
4
6
|
class GetPromptTemplate(TypedDict, total=False):
|
|
5
7
|
version: int
|
|
@@ -110,15 +112,15 @@ Message = Union[
|
|
|
110
112
|
|
|
111
113
|
|
|
112
114
|
class CompletionPromptTemplate(TypedDict, total=False):
|
|
113
|
-
type: Literal["completion"]
|
|
115
|
+
type: Required[Literal["completion"]]
|
|
114
116
|
template_format: TemplateFormat
|
|
115
117
|
content: Sequence[Content]
|
|
116
118
|
input_variables: List[str]
|
|
117
119
|
|
|
118
120
|
|
|
119
121
|
class ChatPromptTemplate(TypedDict, total=False):
|
|
120
|
-
type: Literal["chat"]
|
|
121
|
-
messages: Sequence[Message]
|
|
122
|
+
type: Required[Literal["chat"]]
|
|
123
|
+
messages: Required[Sequence[Message]]
|
|
122
124
|
functions: Sequence[Function]
|
|
123
125
|
function_call: Union[Literal["auto", "none"], ChatFunctionCall]
|
|
124
126
|
input_variables: List[str]
|
|
@@ -130,9 +132,9 @@ PromptTemplate = Union[CompletionPromptTemplate, ChatPromptTemplate]
|
|
|
130
132
|
|
|
131
133
|
|
|
132
134
|
class Model(TypedDict, total=False):
|
|
133
|
-
provider: str
|
|
134
|
-
name: str
|
|
135
|
-
parameters: Dict[str, object]
|
|
135
|
+
provider: Required[str]
|
|
136
|
+
name: Required[str]
|
|
137
|
+
parameters: Required[Dict[str, object]]
|
|
136
138
|
|
|
137
139
|
|
|
138
140
|
class Metadata(TypedDict, total=False):
|
|
@@ -154,13 +156,27 @@ class PublishPromptTemplate(BasePromptTemplate, PromptVersion, total=False):
|
|
|
154
156
|
release_labels: Optional[List[str]] = None
|
|
155
157
|
|
|
156
158
|
|
|
157
|
-
class
|
|
158
|
-
|
|
159
|
-
|
|
159
|
+
class BaseProviderBaseURL(TypedDict):
|
|
160
|
+
name: Required[str]
|
|
161
|
+
provider: Required[str]
|
|
162
|
+
url: Required[str]
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
class ProviderBaseURL(BaseProviderBaseURL):
|
|
166
|
+
id: Required[int]
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
class BasePromptTemplateResponse(TypedDict, total=False):
|
|
170
|
+
id: Required[int]
|
|
171
|
+
prompt_name: Required[str]
|
|
160
172
|
tags: List[str]
|
|
161
|
-
prompt_template: PromptTemplate
|
|
173
|
+
prompt_template: Required[PromptTemplate]
|
|
162
174
|
commit_message: str
|
|
163
175
|
metadata: Metadata
|
|
176
|
+
provider_base_url: ProviderBaseURL
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
a: BasePromptTemplateResponse = {"provider_base_url": {"url": ""}}
|
|
164
180
|
|
|
165
181
|
|
|
166
182
|
class PublishPromptTemplateResponse(BasePromptTemplateResponse):
|
promptlayer/utils.py
CHANGED
|
@@ -8,7 +8,7 @@ import sys
|
|
|
8
8
|
import types
|
|
9
9
|
from copy import deepcopy
|
|
10
10
|
from enum import Enum
|
|
11
|
-
from typing import List, Union
|
|
11
|
+
from typing import Callable, Generator, List, Union
|
|
12
12
|
|
|
13
13
|
import requests
|
|
14
14
|
|
|
@@ -642,3 +642,203 @@ def get_all_prompt_templates(
|
|
|
642
642
|
raise Exception(
|
|
643
643
|
f"PromptLayer had the following error while getting all your prompt templates: {e}"
|
|
644
644
|
)
|
|
645
|
+
|
|
646
|
+
|
|
647
|
+
def track_request(**body):
|
|
648
|
+
try:
|
|
649
|
+
response = requests.post(
|
|
650
|
+
f"{URL_API_PROMPTLAYER}/track-request",
|
|
651
|
+
json=body,
|
|
652
|
+
)
|
|
653
|
+
if response.status_code != 200:
|
|
654
|
+
warn_on_bad_response(
|
|
655
|
+
response,
|
|
656
|
+
f"PromptLayer had the following error while tracking your request: {response.text}",
|
|
657
|
+
)
|
|
658
|
+
return response.json()
|
|
659
|
+
except requests.exceptions.RequestException as e:
|
|
660
|
+
print(
|
|
661
|
+
f"WARNING: While logging your request PromptLayer had the following error: {e}",
|
|
662
|
+
file=sys.stderr,
|
|
663
|
+
)
|
|
664
|
+
return {}
|
|
665
|
+
|
|
666
|
+
|
|
667
|
+
def openai_stream_chat(results: list):
|
|
668
|
+
from openai.types.chat import (
|
|
669
|
+
ChatCompletion,
|
|
670
|
+
ChatCompletionChunk,
|
|
671
|
+
ChatCompletionMessage,
|
|
672
|
+
)
|
|
673
|
+
from openai.types.chat.chat_completion import Choice
|
|
674
|
+
|
|
675
|
+
chat_completion_chunks: List[ChatCompletionChunk] = results
|
|
676
|
+
response: ChatCompletion = ChatCompletion(
|
|
677
|
+
id="",
|
|
678
|
+
object="chat.completion",
|
|
679
|
+
choices=[
|
|
680
|
+
Choice(
|
|
681
|
+
finish_reason="stop",
|
|
682
|
+
index=0,
|
|
683
|
+
message=ChatCompletionMessage(role="assistant"),
|
|
684
|
+
)
|
|
685
|
+
],
|
|
686
|
+
created=0,
|
|
687
|
+
model="",
|
|
688
|
+
)
|
|
689
|
+
last_result = chat_completion_chunks[-1]
|
|
690
|
+
response.id = last_result.id
|
|
691
|
+
response.created = last_result.created
|
|
692
|
+
response.model = last_result.model
|
|
693
|
+
response.system_fingerprint = last_result.system_fingerprint
|
|
694
|
+
response.usage = last_result.usage
|
|
695
|
+
content = ""
|
|
696
|
+
for result in chat_completion_chunks:
|
|
697
|
+
if len(result.choices) > 0 and result.choices[0].delta.content:
|
|
698
|
+
content = f"{content}{result.choices[0].delta.content}"
|
|
699
|
+
response.choices[0].message.content = content
|
|
700
|
+
return response
|
|
701
|
+
|
|
702
|
+
|
|
703
|
+
def openai_stream_completion(results: list):
|
|
704
|
+
from openai.types.completion import Completion, CompletionChoice
|
|
705
|
+
|
|
706
|
+
completions: List[Completion] = results
|
|
707
|
+
last_chunk = completions[-1]
|
|
708
|
+
response = Completion(
|
|
709
|
+
id=last_chunk.id,
|
|
710
|
+
created=last_chunk.created,
|
|
711
|
+
model=last_chunk.model,
|
|
712
|
+
object="text_completion",
|
|
713
|
+
choices=[CompletionChoice(finish_reason="stop", index=0, text="")],
|
|
714
|
+
)
|
|
715
|
+
text = ""
|
|
716
|
+
for completion in completions:
|
|
717
|
+
usage = completion.usage
|
|
718
|
+
system_fingerprint = completion.system_fingerprint
|
|
719
|
+
if len(completion.choices) > 0 and completion.choices[0].text:
|
|
720
|
+
text = f"{text}{completion.choices[0].text}"
|
|
721
|
+
if usage:
|
|
722
|
+
response.usage = usage
|
|
723
|
+
if system_fingerprint:
|
|
724
|
+
response.system_fingerprint = system_fingerprint
|
|
725
|
+
response.choices[0].text = text
|
|
726
|
+
return response
|
|
727
|
+
|
|
728
|
+
|
|
729
|
+
def anthropic_stream_message(results: list):
|
|
730
|
+
from anthropic.types import Message, MessageStreamEvent, TextBlock, Usage
|
|
731
|
+
|
|
732
|
+
message_stream_events: List[MessageStreamEvent] = results
|
|
733
|
+
response: Message = Message(
|
|
734
|
+
id="",
|
|
735
|
+
model="",
|
|
736
|
+
content=[],
|
|
737
|
+
role="assistant",
|
|
738
|
+
type="message",
|
|
739
|
+
stop_reason="stop_sequence",
|
|
740
|
+
stop_sequence=None,
|
|
741
|
+
usage=Usage(input_tokens=0, output_tokens=0),
|
|
742
|
+
)
|
|
743
|
+
content = ""
|
|
744
|
+
for result in message_stream_events:
|
|
745
|
+
if result.type == "message_start":
|
|
746
|
+
response = result.message
|
|
747
|
+
elif result.type == "content_block_delta":
|
|
748
|
+
if result.delta.type == "text_delta":
|
|
749
|
+
content = f"{content}{result.delta.text}"
|
|
750
|
+
elif result.type == "message_delta":
|
|
751
|
+
if hasattr(result, "usage"):
|
|
752
|
+
response.usage.output_tokens = result.usage.output_tokens
|
|
753
|
+
if hasattr(result.delta, "stop_reason"):
|
|
754
|
+
response.stop_reason = result.delta.stop_reason
|
|
755
|
+
response.content.append(TextBlock(type="text", text=content))
|
|
756
|
+
return response
|
|
757
|
+
|
|
758
|
+
|
|
759
|
+
def anthropic_stream_completion(results: list):
|
|
760
|
+
from anthropic.types import Completion
|
|
761
|
+
|
|
762
|
+
completions: List[Completion] = results
|
|
763
|
+
last_chunk = completions[-1]
|
|
764
|
+
response = Completion(
|
|
765
|
+
id=last_chunk.id,
|
|
766
|
+
completion="",
|
|
767
|
+
model=last_chunk.model,
|
|
768
|
+
stop_reason="stop",
|
|
769
|
+
type="completion",
|
|
770
|
+
)
|
|
771
|
+
|
|
772
|
+
text = ""
|
|
773
|
+
for completion in completions:
|
|
774
|
+
text = f"{text}{completion.completion}"
|
|
775
|
+
response.completion = text
|
|
776
|
+
return response
|
|
777
|
+
|
|
778
|
+
|
|
779
|
+
def stream_response(
|
|
780
|
+
generator: Generator, after_stream: Callable, map_results: Callable
|
|
781
|
+
):
|
|
782
|
+
data = {
|
|
783
|
+
"request_id": None,
|
|
784
|
+
"raw_response": None,
|
|
785
|
+
"prompt_blueprint": None,
|
|
786
|
+
}
|
|
787
|
+
results = []
|
|
788
|
+
for result in generator:
|
|
789
|
+
results.append(result)
|
|
790
|
+
data["raw_response"] = result
|
|
791
|
+
yield data
|
|
792
|
+
request_response = map_results(results)
|
|
793
|
+
response = after_stream(request_response=request_response.model_dump())
|
|
794
|
+
data["request_id"] = response.get("request_id")
|
|
795
|
+
data["prompt_blueprint"] = response.get("prompt_blueprint")
|
|
796
|
+
yield data
|
|
797
|
+
|
|
798
|
+
|
|
799
|
+
def openai_chat_request(client, **kwargs):
|
|
800
|
+
return client.chat.completions.create(**kwargs)
|
|
801
|
+
|
|
802
|
+
|
|
803
|
+
def openai_completions_request(client, **kwargs):
|
|
804
|
+
return client.completions.create(**kwargs)
|
|
805
|
+
|
|
806
|
+
|
|
807
|
+
MAP_TYPE_TO_OPENAI_FUNCTION = {
|
|
808
|
+
"chat": openai_chat_request,
|
|
809
|
+
"completion": openai_completions_request,
|
|
810
|
+
}
|
|
811
|
+
|
|
812
|
+
|
|
813
|
+
def openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
|
|
814
|
+
from openai import OpenAI
|
|
815
|
+
|
|
816
|
+
client = OpenAI(base_url=kwargs.pop("base_url", None))
|
|
817
|
+
request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[
|
|
818
|
+
prompt_blueprint["prompt_template"]["type"]
|
|
819
|
+
]
|
|
820
|
+
return request_to_make(client, **kwargs)
|
|
821
|
+
|
|
822
|
+
|
|
823
|
+
def anthropic_chat_request(client, **kwargs):
|
|
824
|
+
return client.messages.create(**kwargs)
|
|
825
|
+
|
|
826
|
+
|
|
827
|
+
def anthropic_completions_request(client, **kwargs):
|
|
828
|
+
return client.completions.create(**kwargs)
|
|
829
|
+
|
|
830
|
+
|
|
831
|
+
MAP_TYPE_TO_ANTHROPIC_FUNCTION = {
|
|
832
|
+
"chat": anthropic_chat_request,
|
|
833
|
+
"completion": anthropic_completions_request,
|
|
834
|
+
}
|
|
835
|
+
|
|
836
|
+
|
|
837
|
+
def anthropic_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
|
|
838
|
+
from anthropic import Anthropic
|
|
839
|
+
|
|
840
|
+
client = Anthropic(base_url=kwargs.pop("base_url", None))
|
|
841
|
+
request_to_make = MAP_TYPE_TO_ANTHROPIC_FUNCTION[
|
|
842
|
+
prompt_blueprint["prompt_template"]["type"]
|
|
843
|
+
]
|
|
844
|
+
return request_to_make(client, **kwargs)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
promptlayer/__init__.py,sha256=
|
|
1
|
+
promptlayer/__init__.py,sha256=EnsNOWnboXXlbMBOi06-4JIg__9v9Z6uyzIwjoZOGus,6269
|
|
2
2
|
promptlayer/groups/__init__.py,sha256=-xs-2cn0nc0D_5YxZr3nC86iTdRVZmBhEpOKDJXE-sQ,224
|
|
3
3
|
promptlayer/groups/groups.py,sha256=yeO6T0TM3qB0ondZRiHhcH8G06YygrpFoM8b9RmoIao,165
|
|
4
4
|
promptlayer/promptlayer.py,sha256=1q1cZOBt27Luzu3aRWcYUjQiKbmwrF9R62Uw95Ryqqc,4035
|
|
@@ -6,9 +6,9 @@ promptlayer/templates.py,sha256=aY_-BCrL0AgIdYEUE28pi0AP_avTVAgwv5hgzrh75vo,717
|
|
|
6
6
|
promptlayer/track/__init__.py,sha256=VheO_Au0lffGlPKYYPQwkv8ci16wSXABCVSNRoFWu_w,945
|
|
7
7
|
promptlayer/track/track.py,sha256=XNEZT9yNiRBPp9vaDZo_f0dP_ldOu8q1qafpVfS5Ze8,1610
|
|
8
8
|
promptlayer/types/__init__.py,sha256=ulWSyCrk5hZ_PI-nKGpd6GPcRaK8lqP4wFl0LPNUYWk,61
|
|
9
|
-
promptlayer/types/prompt_template.py,sha256=
|
|
10
|
-
promptlayer/utils.py,sha256=
|
|
11
|
-
promptlayer-1.0.
|
|
12
|
-
promptlayer-1.0.
|
|
13
|
-
promptlayer-1.0.
|
|
14
|
-
promptlayer-1.0.
|
|
9
|
+
promptlayer/types/prompt_template.py,sha256=a_NzJpRW8UjSmVWNzTET0bnEAxmIYVMWWqfmGngfr68,4247
|
|
10
|
+
promptlayer/utils.py,sha256=pVRLCAIikoCn1p95otxZYn0xMFN-cnDBJyN1fbT58tE,27305
|
|
11
|
+
promptlayer-1.0.3.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
12
|
+
promptlayer-1.0.3.dist-info/METADATA,sha256=C2fcG0_cj6U4QRsIFzfSOL8igkPIHAnn7QDnQ7hwZB8,4628
|
|
13
|
+
promptlayer-1.0.3.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
14
|
+
promptlayer-1.0.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|