promptlayer 1.0.11__py3-none-any.whl → 1.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of promptlayer might be problematic. Click here for more details.
- promptlayer/__init__.py +2 -169
- promptlayer/promptlayer.py +410 -89
- promptlayer/promptlayer_base.py +176 -0
- promptlayer/span_exporter.py +77 -0
- promptlayer/utils.py +97 -64
- {promptlayer-1.0.11.dist-info → promptlayer-1.0.13.dist-info}/METADATA +3 -1
- promptlayer-1.0.13.dist-info/RECORD +16 -0
- promptlayer-1.0.11.dist-info/RECORD +0 -14
- {promptlayer-1.0.11.dist-info → promptlayer-1.0.13.dist-info}/LICENSE +0 -0
- {promptlayer-1.0.11.dist-info → promptlayer-1.0.13.dist-info}/WHEEL +0 -0
promptlayer/__init__.py
CHANGED
|
@@ -1,171 +1,4 @@
|
|
|
1
|
-
import
|
|
2
|
-
import os
|
|
3
|
-
from copy import deepcopy
|
|
4
|
-
from typing import Any, Dict, List, Literal, Union
|
|
1
|
+
from .promptlayer import PromptLayer
|
|
5
2
|
|
|
6
|
-
|
|
7
|
-
from promptlayer.promptlayer import PromptLayerBase
|
|
8
|
-
from promptlayer.templates import TemplateManager
|
|
9
|
-
from promptlayer.track import TrackManager
|
|
10
|
-
from promptlayer.types.prompt_template import GetPromptTemplate
|
|
11
|
-
from promptlayer.utils import (
|
|
12
|
-
anthropic_request,
|
|
13
|
-
anthropic_stream_completion,
|
|
14
|
-
anthropic_stream_message,
|
|
15
|
-
openai_request,
|
|
16
|
-
openai_stream_chat,
|
|
17
|
-
openai_stream_completion,
|
|
18
|
-
stream_response,
|
|
19
|
-
track_request,
|
|
20
|
-
)
|
|
21
|
-
|
|
22
|
-
MAP_PROVIDER_TO_FUNCTION_NAME = {
|
|
23
|
-
"openai": {
|
|
24
|
-
"chat": {
|
|
25
|
-
"function_name": "openai.chat.completions.create",
|
|
26
|
-
"stream_function": openai_stream_chat,
|
|
27
|
-
},
|
|
28
|
-
"completion": {
|
|
29
|
-
"function_name": "openai.completions.create",
|
|
30
|
-
"stream_function": openai_stream_completion,
|
|
31
|
-
},
|
|
32
|
-
},
|
|
33
|
-
"anthropic": {
|
|
34
|
-
"chat": {
|
|
35
|
-
"function_name": "anthropic.messages.create",
|
|
36
|
-
"stream_function": anthropic_stream_message,
|
|
37
|
-
},
|
|
38
|
-
"completion": {
|
|
39
|
-
"function_name": "anthropic.completions.create",
|
|
40
|
-
"stream_function": anthropic_stream_completion,
|
|
41
|
-
},
|
|
42
|
-
},
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
MAP_PROVIDER_TO_FUNCTION = {
|
|
46
|
-
"openai": openai_request,
|
|
47
|
-
"anthropic": anthropic_request,
|
|
48
|
-
}
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
class PromptLayer:
|
|
52
|
-
def __init__(self, api_key: str = None):
|
|
53
|
-
if api_key is None:
|
|
54
|
-
api_key = os.environ.get("PROMPTLAYER_API_KEY")
|
|
55
|
-
if api_key is None:
|
|
56
|
-
raise ValueError(
|
|
57
|
-
"PromptLayer API key not provided. Please set the PROMPTLAYER_API_KEY environment variable or pass the api_key parameter."
|
|
58
|
-
)
|
|
59
|
-
self.api_key = api_key
|
|
60
|
-
self.templates = TemplateManager(api_key)
|
|
61
|
-
self.group = GroupManager(api_key)
|
|
62
|
-
self.track = TrackManager(api_key)
|
|
63
|
-
|
|
64
|
-
def __getattr__(
|
|
65
|
-
self,
|
|
66
|
-
name: Union[Literal["openai"], Literal["anthropic"], Literal["prompts"]],
|
|
67
|
-
):
|
|
68
|
-
if name == "openai":
|
|
69
|
-
import openai as openai_module
|
|
70
|
-
|
|
71
|
-
openai = PromptLayerBase(
|
|
72
|
-
openai_module, function_name="openai", api_key=self.api_key
|
|
73
|
-
)
|
|
74
|
-
return openai
|
|
75
|
-
elif name == "anthropic":
|
|
76
|
-
import anthropic as anthropic_module
|
|
77
|
-
|
|
78
|
-
anthropic = PromptLayerBase(
|
|
79
|
-
anthropic_module,
|
|
80
|
-
function_name="anthropic",
|
|
81
|
-
provider_type="anthropic",
|
|
82
|
-
api_key=self.api_key,
|
|
83
|
-
)
|
|
84
|
-
return anthropic
|
|
85
|
-
else:
|
|
86
|
-
raise AttributeError(f"module {__name__} has no attribute {name}")
|
|
87
|
-
|
|
88
|
-
def run(
|
|
89
|
-
self,
|
|
90
|
-
prompt_name: str,
|
|
91
|
-
prompt_version: Union[int, None] = None,
|
|
92
|
-
prompt_release_label: Union[str, None] = None,
|
|
93
|
-
input_variables: Union[Dict[str, Any], None] = None,
|
|
94
|
-
tags: Union[List[str], None] = None,
|
|
95
|
-
metadata: Union[Dict[str, str], None] = None,
|
|
96
|
-
group_id: Union[int, None] = None,
|
|
97
|
-
stream=False,
|
|
98
|
-
):
|
|
99
|
-
template_get_params: GetPromptTemplate = {}
|
|
100
|
-
if prompt_version:
|
|
101
|
-
template_get_params["version"] = prompt_version
|
|
102
|
-
if prompt_release_label:
|
|
103
|
-
template_get_params["label"] = prompt_release_label
|
|
104
|
-
if input_variables:
|
|
105
|
-
template_get_params["input_variables"] = input_variables
|
|
106
|
-
if metadata:
|
|
107
|
-
template_get_params["metadata_filters"] = metadata
|
|
108
|
-
prompt_blueprint = self.templates.get(prompt_name, template_get_params)
|
|
109
|
-
prompt_template = prompt_blueprint["prompt_template"]
|
|
110
|
-
if not prompt_blueprint["llm_kwargs"]:
|
|
111
|
-
raise Exception(
|
|
112
|
-
f"Prompt '{prompt_name}' does not have any LLM kwargs associated with it."
|
|
113
|
-
)
|
|
114
|
-
prompt_blueprint_metadata = prompt_blueprint.get("metadata", None)
|
|
115
|
-
if prompt_blueprint_metadata is None:
|
|
116
|
-
raise Exception(
|
|
117
|
-
f"Prompt '{prompt_name}' does not have any metadata associated with it."
|
|
118
|
-
)
|
|
119
|
-
prompt_blueprint_model = prompt_blueprint_metadata.get("model", None)
|
|
120
|
-
if prompt_blueprint_model is None:
|
|
121
|
-
raise Exception(
|
|
122
|
-
f"Prompt '{prompt_name}' does not have a model parameters associated with it."
|
|
123
|
-
)
|
|
124
|
-
provider = prompt_blueprint_model["provider"]
|
|
125
|
-
request_start_time = datetime.datetime.now(datetime.timezone.utc).timestamp()
|
|
126
|
-
kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
|
|
127
|
-
config = MAP_PROVIDER_TO_FUNCTION_NAME[provider][prompt_template["type"]]
|
|
128
|
-
function_name = config["function_name"]
|
|
129
|
-
stream_function = config["stream_function"]
|
|
130
|
-
request_function = MAP_PROVIDER_TO_FUNCTION[provider]
|
|
131
|
-
provider_base_url = prompt_blueprint.get("provider_base_url", None)
|
|
132
|
-
if provider_base_url:
|
|
133
|
-
kwargs["base_url"] = provider_base_url["url"]
|
|
134
|
-
kwargs["stream"] = stream
|
|
135
|
-
if stream and provider == "openai":
|
|
136
|
-
kwargs["stream_options"] = {"include_usage": True}
|
|
137
|
-
response = request_function(prompt_blueprint, **kwargs)
|
|
138
|
-
|
|
139
|
-
def _track_request(**body):
|
|
140
|
-
request_end_time = datetime.datetime.now(datetime.timezone.utc).timestamp()
|
|
141
|
-
return track_request(
|
|
142
|
-
function_name=function_name,
|
|
143
|
-
provider_type=provider,
|
|
144
|
-
args=[],
|
|
145
|
-
kwargs=kwargs,
|
|
146
|
-
tags=tags,
|
|
147
|
-
request_start_time=request_start_time,
|
|
148
|
-
request_end_time=request_end_time,
|
|
149
|
-
api_key=self.api_key,
|
|
150
|
-
metadata=metadata,
|
|
151
|
-
prompt_id=prompt_blueprint["id"],
|
|
152
|
-
prompt_version=prompt_blueprint["version"],
|
|
153
|
-
prompt_input_variables=input_variables,
|
|
154
|
-
group_id=group_id,
|
|
155
|
-
return_prompt_blueprint=True,
|
|
156
|
-
**body,
|
|
157
|
-
)
|
|
158
|
-
|
|
159
|
-
if stream:
|
|
160
|
-
return stream_response(response, _track_request, stream_function)
|
|
161
|
-
request_log = _track_request(request_response=response.model_dump())
|
|
162
|
-
data = {
|
|
163
|
-
"request_id": request_log.get("request_id", None),
|
|
164
|
-
"raw_response": response,
|
|
165
|
-
"prompt_blueprint": request_log.get("prompt_blueprint", None),
|
|
166
|
-
}
|
|
167
|
-
return data
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
__version__ = "1.0.11"
|
|
3
|
+
__version__ = "1.0.13"
|
|
171
4
|
__all__ = ["PromptLayer", "__version__"]
|
promptlayer/promptlayer.py
CHANGED
|
@@ -1,96 +1,417 @@
|
|
|
1
|
+
import asyncio
|
|
1
2
|
import datetime
|
|
2
|
-
import
|
|
3
|
-
import
|
|
4
|
-
|
|
5
|
-
from
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
3
|
+
import os
|
|
4
|
+
from copy import deepcopy
|
|
5
|
+
from functools import wraps
|
|
6
|
+
from typing import Any, Dict, List, Literal, Union
|
|
7
|
+
|
|
8
|
+
from opentelemetry import trace
|
|
9
|
+
from opentelemetry.sdk.resources import Resource
|
|
10
|
+
from opentelemetry.sdk.trace import TracerProvider
|
|
11
|
+
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
|
12
|
+
from opentelemetry.semconv.resource import ResourceAttributes
|
|
13
|
+
|
|
14
|
+
from promptlayer.groups import GroupManager
|
|
15
|
+
from promptlayer.promptlayer_base import PromptLayerBase
|
|
16
|
+
from promptlayer.span_exporter import PromptLayerSpanExporter
|
|
17
|
+
from promptlayer.templates import TemplateManager
|
|
18
|
+
from promptlayer.track import TrackManager
|
|
19
|
+
from promptlayer.utils import (
|
|
20
|
+
anthropic_request,
|
|
21
|
+
anthropic_stream_completion,
|
|
22
|
+
anthropic_stream_message,
|
|
23
|
+
openai_request,
|
|
24
|
+
openai_stream_chat,
|
|
25
|
+
openai_stream_completion,
|
|
26
|
+
stream_response,
|
|
27
|
+
track_request,
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
MAP_PROVIDER_TO_FUNCTION_NAME = {
|
|
31
|
+
"openai": {
|
|
32
|
+
"chat": {
|
|
33
|
+
"function_name": "openai.chat.completions.create",
|
|
34
|
+
"stream_function": openai_stream_chat,
|
|
35
|
+
},
|
|
36
|
+
"completion": {
|
|
37
|
+
"function_name": "openai.completions.create",
|
|
38
|
+
"stream_function": openai_stream_completion,
|
|
39
|
+
},
|
|
40
|
+
},
|
|
41
|
+
"anthropic": {
|
|
42
|
+
"chat": {
|
|
43
|
+
"function_name": "anthropic.messages.create",
|
|
44
|
+
"stream_function": anthropic_stream_message,
|
|
45
|
+
},
|
|
46
|
+
"completion": {
|
|
47
|
+
"function_name": "anthropic.completions.create",
|
|
48
|
+
"stream_function": anthropic_stream_completion,
|
|
49
|
+
},
|
|
50
|
+
},
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
MAP_PROVIDER_TO_FUNCTION = {
|
|
54
|
+
"openai": openai_request,
|
|
55
|
+
"anthropic": anthropic_request,
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class PromptLayer:
|
|
60
|
+
def __init__(
|
|
61
|
+
self,
|
|
62
|
+
api_key: str = None,
|
|
63
|
+
enable_tracing: bool = False,
|
|
64
|
+
workspace_id: int = None,
|
|
65
|
+
):
|
|
66
|
+
if api_key is None:
|
|
67
|
+
api_key = os.environ.get("PROMPTLAYER_API_KEY")
|
|
68
|
+
|
|
69
|
+
if api_key is None:
|
|
70
|
+
raise ValueError(
|
|
71
|
+
"PromptLayer API key not provided. "
|
|
72
|
+
"Please set the PROMPTLAYER_API_KEY environment variable or pass the api_key parameter."
|
|
39
73
|
)
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
74
|
+
|
|
75
|
+
if enable_tracing and not workspace_id:
|
|
76
|
+
raise ValueError("Please set a workspace_id to enable tracing.")
|
|
77
|
+
|
|
78
|
+
self.api_key = api_key
|
|
79
|
+
self.templates = TemplateManager(api_key)
|
|
80
|
+
self.group = GroupManager(api_key)
|
|
81
|
+
self.tracer = self._initialize_tracer(api_key, enable_tracing, workspace_id)
|
|
82
|
+
self.track = TrackManager(api_key)
|
|
83
|
+
self.workspace_id = workspace_id
|
|
84
|
+
|
|
85
|
+
def __getattr__(
|
|
86
|
+
self,
|
|
87
|
+
name: Union[Literal["openai"], Literal["anthropic"], Literal["prompts"]],
|
|
88
|
+
):
|
|
89
|
+
if name == "openai":
|
|
90
|
+
import openai as openai_module
|
|
91
|
+
|
|
92
|
+
openai = PromptLayerBase(
|
|
93
|
+
openai_module,
|
|
94
|
+
function_name="openai",
|
|
95
|
+
api_key=self.api_key,
|
|
96
|
+
tracer=self.tracer,
|
|
46
97
|
)
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
if tags is not None and not isinstance(tags, list):
|
|
58
|
-
raise Exception("pl_tags must be a list of strings.")
|
|
59
|
-
return_pl_id = kwargs.pop("return_pl_id", False)
|
|
60
|
-
request_start_time = datetime.datetime.now().timestamp()
|
|
61
|
-
function_object = object.__getattribute__(self, "_obj")
|
|
62
|
-
if inspect.isclass(function_object):
|
|
63
|
-
return PromptLayerBase(
|
|
64
|
-
function_object(*args, **kwargs),
|
|
65
|
-
function_name=object.__getattribute__(self, "_function_name"),
|
|
66
|
-
provider_type=object.__getattribute__(self, "_provider_type"),
|
|
67
|
-
api_key=object.__getattribute__(self, "_api_key"),
|
|
98
|
+
return openai
|
|
99
|
+
elif name == "anthropic":
|
|
100
|
+
import anthropic as anthropic_module
|
|
101
|
+
|
|
102
|
+
anthropic = PromptLayerBase(
|
|
103
|
+
anthropic_module,
|
|
104
|
+
function_name="anthropic",
|
|
105
|
+
provider_type="anthropic",
|
|
106
|
+
api_key=self.api_key,
|
|
107
|
+
tracer=self.tracer,
|
|
68
108
|
)
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
tags,
|
|
80
|
-
api_key=object.__getattribute__(self, "_api_key"),
|
|
81
|
-
*args,
|
|
82
|
-
**kwargs,
|
|
109
|
+
return anthropic
|
|
110
|
+
else:
|
|
111
|
+
raise AttributeError(f"module {__name__} has no attribute {name}")
|
|
112
|
+
|
|
113
|
+
def _create_track_request_callable(
|
|
114
|
+
self, request_params, tags, input_variables, group_id, span_id
|
|
115
|
+
):
|
|
116
|
+
def _track_request(**body):
|
|
117
|
+
track_request_kwargs = self._prepare_track_request_kwargs(
|
|
118
|
+
request_params, tags, input_variables, group_id, span_id, **body
|
|
83
119
|
)
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
120
|
+
if self.tracer:
|
|
121
|
+
with self.tracer.start_as_current_span("track_request"):
|
|
122
|
+
return track_request(**track_request_kwargs)
|
|
123
|
+
return track_request(**track_request_kwargs)
|
|
124
|
+
|
|
125
|
+
return _track_request
|
|
126
|
+
|
|
127
|
+
def _fetch_prompt_blueprint(self, *, prompt_name, template_params):
|
|
128
|
+
if self.tracer:
|
|
129
|
+
with self.tracer.start_as_current_span("fetch_prompt_template") as span:
|
|
130
|
+
span.set_attribute("prompt_name", prompt_name)
|
|
131
|
+
span.set_attribute(
|
|
132
|
+
"function_input",
|
|
133
|
+
str(
|
|
134
|
+
{"prompt_name": prompt_name, "template_params": template_params}
|
|
135
|
+
),
|
|
136
|
+
)
|
|
137
|
+
result = self.templates.get(prompt_name, template_params)
|
|
138
|
+
span.set_attribute("function_output", str(result))
|
|
139
|
+
return result
|
|
140
|
+
return self.templates.get(prompt_name, template_params)
|
|
141
|
+
|
|
142
|
+
@staticmethod
|
|
143
|
+
def _initialize_tracer(
|
|
144
|
+
api_key: str = None, enable_tracing: bool = False, workspace_id: int = None
|
|
145
|
+
):
|
|
146
|
+
if enable_tracing:
|
|
147
|
+
resource = Resource(
|
|
148
|
+
attributes={ResourceAttributes.SERVICE_NAME: "prompt-layer-library"}
|
|
149
|
+
)
|
|
150
|
+
tracer_provider = TracerProvider(resource=resource)
|
|
151
|
+
promptlayer_exporter = PromptLayerSpanExporter(
|
|
152
|
+
api_key=api_key, workspace_id=workspace_id
|
|
153
|
+
)
|
|
154
|
+
span_processor = BatchSpanProcessor(promptlayer_exporter)
|
|
155
|
+
tracer_provider.add_span_processor(span_processor)
|
|
156
|
+
trace.set_tracer_provider(tracer_provider)
|
|
157
|
+
return trace.get_tracer(__name__)
|
|
158
|
+
else:
|
|
159
|
+
return None
|
|
160
|
+
|
|
161
|
+
def _make_llm_request(self, request_params):
|
|
162
|
+
span_id = None
|
|
163
|
+
|
|
164
|
+
if self.tracer:
|
|
165
|
+
with self.tracer.start_as_current_span("llm_request") as span:
|
|
166
|
+
span.set_attribute("provider", request_params["provider"])
|
|
167
|
+
span.set_attribute("function_name", request_params["function_name"])
|
|
168
|
+
span.set_attribute("function_input", str(request_params))
|
|
169
|
+
span_id = hex(span.context.span_id)[2:].zfill(16)
|
|
170
|
+
response = request_params["request_function"](
|
|
171
|
+
request_params["prompt_blueprint"], **request_params["kwargs"]
|
|
172
|
+
)
|
|
173
|
+
span.set_attribute("function_output", str(response))
|
|
174
|
+
else:
|
|
175
|
+
response = request_params["request_function"](
|
|
176
|
+
request_params["prompt_blueprint"], **request_params["kwargs"]
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
return response, span_id
|
|
180
|
+
|
|
181
|
+
@staticmethod
|
|
182
|
+
def _prepare_get_prompt_template_params(
|
|
183
|
+
*, prompt_version, prompt_release_label, input_variables, metadata
|
|
184
|
+
):
|
|
185
|
+
params = {}
|
|
186
|
+
|
|
187
|
+
if prompt_version:
|
|
188
|
+
params["version"] = prompt_version
|
|
189
|
+
if prompt_release_label:
|
|
190
|
+
params["label"] = prompt_release_label
|
|
191
|
+
if input_variables:
|
|
192
|
+
params["input_variables"] = input_variables
|
|
193
|
+
if metadata:
|
|
194
|
+
params["metadata_filters"] = metadata
|
|
195
|
+
|
|
196
|
+
return params
|
|
197
|
+
|
|
198
|
+
@staticmethod
|
|
199
|
+
def _prepare_llm_request_params(
|
|
200
|
+
*, prompt_blueprint, prompt_template, prompt_blueprint_model, stream
|
|
201
|
+
):
|
|
202
|
+
provider = prompt_blueprint_model["provider"]
|
|
203
|
+
kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
|
|
204
|
+
config = MAP_PROVIDER_TO_FUNCTION_NAME[provider][prompt_template["type"]]
|
|
205
|
+
|
|
206
|
+
if provider_base_url := prompt_blueprint.get("provider_base_url"):
|
|
207
|
+
kwargs["base_url"] = provider_base_url["url"]
|
|
208
|
+
|
|
209
|
+
kwargs["stream"] = stream
|
|
210
|
+
if stream and provider == "openai":
|
|
211
|
+
kwargs["stream_options"] = {"include_usage": True}
|
|
212
|
+
|
|
213
|
+
return {
|
|
214
|
+
"provider": provider,
|
|
215
|
+
"function_name": config["function_name"],
|
|
216
|
+
"stream_function": config["stream_function"],
|
|
217
|
+
"request_function": MAP_PROVIDER_TO_FUNCTION[provider],
|
|
218
|
+
"kwargs": kwargs,
|
|
219
|
+
"prompt_blueprint": prompt_blueprint,
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
def _prepare_track_request_kwargs(
|
|
223
|
+
self, request_params, tags, input_variables, group_id, span_id, **body
|
|
224
|
+
):
|
|
225
|
+
return {
|
|
226
|
+
"function_name": request_params["function_name"],
|
|
227
|
+
"provider_type": request_params["provider"],
|
|
228
|
+
"args": [],
|
|
229
|
+
"kwargs": request_params["kwargs"],
|
|
230
|
+
"tags": tags,
|
|
231
|
+
"request_start_time": datetime.datetime.now(
|
|
232
|
+
datetime.timezone.utc
|
|
233
|
+
).timestamp(),
|
|
234
|
+
"request_end_time": datetime.datetime.now(
|
|
235
|
+
datetime.timezone.utc
|
|
236
|
+
).timestamp(),
|
|
237
|
+
"api_key": self.api_key,
|
|
238
|
+
"metadata": request_params.get("metadata"),
|
|
239
|
+
"prompt_id": request_params["prompt_blueprint"]["id"],
|
|
240
|
+
"prompt_version": request_params["prompt_blueprint"]["version"],
|
|
241
|
+
"prompt_input_variables": input_variables,
|
|
242
|
+
"group_id": group_id,
|
|
243
|
+
"return_prompt_blueprint": True,
|
|
244
|
+
"span_id": span_id,
|
|
245
|
+
**body,
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
def _run_internal(
|
|
249
|
+
self,
|
|
250
|
+
*,
|
|
251
|
+
prompt_name: str,
|
|
252
|
+
prompt_version: Union[int, None] = None,
|
|
253
|
+
prompt_release_label: Union[str, None] = None,
|
|
254
|
+
input_variables: Union[Dict[str, Any], None] = None,
|
|
255
|
+
tags: Union[List[str], None] = None,
|
|
256
|
+
metadata: Union[Dict[str, str], None] = None,
|
|
257
|
+
group_id: Union[int, None] = None,
|
|
258
|
+
stream: bool = False,
|
|
259
|
+
) -> Dict[str, Any]:
|
|
260
|
+
get_prompt_template_params = self._prepare_get_prompt_template_params(
|
|
261
|
+
prompt_version=prompt_version,
|
|
262
|
+
prompt_release_label=prompt_release_label,
|
|
263
|
+
input_variables=input_variables,
|
|
264
|
+
metadata=metadata,
|
|
265
|
+
)
|
|
266
|
+
prompt_blueprint = self._fetch_prompt_blueprint(
|
|
267
|
+
prompt_name=prompt_name, template_params=get_prompt_template_params
|
|
268
|
+
)
|
|
269
|
+
prompt_blueprint_model = self._validate_and_extract_model_from_prompt_blueprint(
|
|
270
|
+
prompt_blueprint=prompt_blueprint, prompt_name=prompt_name
|
|
271
|
+
)
|
|
272
|
+
llm_request_params = self._prepare_llm_request_params(
|
|
273
|
+
prompt_blueprint=prompt_blueprint,
|
|
274
|
+
prompt_template=prompt_blueprint["prompt_template"],
|
|
275
|
+
prompt_blueprint_model=prompt_blueprint_model,
|
|
276
|
+
stream=stream,
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
response, span_id = self._make_llm_request(llm_request_params)
|
|
280
|
+
|
|
281
|
+
if stream:
|
|
282
|
+
return stream_response(
|
|
283
|
+
response,
|
|
284
|
+
self._create_track_request_callable(
|
|
285
|
+
llm_request_params, tags, input_variables, group_id, span_id
|
|
286
|
+
),
|
|
287
|
+
llm_request_params["stream_function"],
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
request_log = self._track_request_log(
|
|
291
|
+
llm_request_params,
|
|
90
292
|
tags,
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
return_pl_id=return_pl_id,
|
|
293
|
+
input_variables,
|
|
294
|
+
group_id,
|
|
295
|
+
span_id,
|
|
296
|
+
request_response=response.model_dump(),
|
|
96
297
|
)
|
|
298
|
+
|
|
299
|
+
return {
|
|
300
|
+
"request_id": request_log.get("request_id", None),
|
|
301
|
+
"raw_response": response,
|
|
302
|
+
"prompt_blueprint": request_log.get("prompt_blueprint", None),
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
def _track_request_log(
|
|
306
|
+
self, request_params, tags, input_variables, group_id, span_id, **body
|
|
307
|
+
):
|
|
308
|
+
track_request_kwargs = self._prepare_track_request_kwargs(
|
|
309
|
+
request_params, tags, input_variables, group_id, span_id, **body
|
|
310
|
+
)
|
|
311
|
+
if self.tracer:
|
|
312
|
+
with self.tracer.start_as_current_span("track_request") as span:
|
|
313
|
+
span.set_attribute("function_input", str(track_request_kwargs))
|
|
314
|
+
result = track_request(**track_request_kwargs)
|
|
315
|
+
span.set_attribute("function_output", str(result))
|
|
316
|
+
return result
|
|
317
|
+
return track_request(**track_request_kwargs)
|
|
318
|
+
|
|
319
|
+
@staticmethod
|
|
320
|
+
def _validate_and_extract_model_from_prompt_blueprint(
|
|
321
|
+
*, prompt_blueprint, prompt_name
|
|
322
|
+
):
|
|
323
|
+
if not prompt_blueprint["llm_kwargs"]:
|
|
324
|
+
raise ValueError(
|
|
325
|
+
f"Prompt '{prompt_name}' does not have any LLM kwargs associated with it."
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
prompt_blueprint_metadata = prompt_blueprint.get("metadata")
|
|
329
|
+
|
|
330
|
+
if not prompt_blueprint_metadata:
|
|
331
|
+
raise ValueError(
|
|
332
|
+
f"Prompt '{prompt_name}' does not have any metadata associated with it."
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
prompt_blueprint_model = prompt_blueprint_metadata.get("model")
|
|
336
|
+
|
|
337
|
+
if not prompt_blueprint_model:
|
|
338
|
+
raise ValueError(
|
|
339
|
+
f"Prompt '{prompt_name}' does not have a model parameters associated with it."
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
return prompt_blueprint_model
|
|
343
|
+
|
|
344
|
+
def run(
|
|
345
|
+
self,
|
|
346
|
+
prompt_name: str,
|
|
347
|
+
prompt_version: Union[int, None] = None,
|
|
348
|
+
prompt_release_label: Union[str, None] = None,
|
|
349
|
+
input_variables: Union[Dict[str, Any], None] = None,
|
|
350
|
+
tags: Union[List[str], None] = None,
|
|
351
|
+
metadata: Union[Dict[str, str], None] = None,
|
|
352
|
+
group_id: Union[int, None] = None,
|
|
353
|
+
stream: bool = False,
|
|
354
|
+
) -> Dict[str, Any]:
|
|
355
|
+
_run_internal_kwargs = {
|
|
356
|
+
"prompt_name": prompt_name,
|
|
357
|
+
"prompt_version": prompt_version,
|
|
358
|
+
"prompt_release_label": prompt_release_label,
|
|
359
|
+
"input_variables": input_variables,
|
|
360
|
+
"tags": tags,
|
|
361
|
+
"metadata": metadata,
|
|
362
|
+
"group_id": group_id,
|
|
363
|
+
"stream": stream,
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
if self.tracer:
|
|
367
|
+
with self.tracer.start_as_current_span("PromptLayer.run") as main_span:
|
|
368
|
+
main_span.set_attribute("prompt_name", prompt_name)
|
|
369
|
+
main_span.set_attribute("stream", stream)
|
|
370
|
+
main_span.set_attribute("function_input", str(_run_internal_kwargs))
|
|
371
|
+
result = self._run_internal(**_run_internal_kwargs)
|
|
372
|
+
main_span.set_attribute("function_output", str(result))
|
|
373
|
+
return result
|
|
374
|
+
else:
|
|
375
|
+
return self._run_internal(**_run_internal_kwargs)
|
|
376
|
+
|
|
377
|
+
def traceable(self, metadata=None):
|
|
378
|
+
def decorator(func):
|
|
379
|
+
@wraps(func)
|
|
380
|
+
def sync_wrapper(*args, **kwargs):
|
|
381
|
+
if self.tracer:
|
|
382
|
+
with self.tracer.start_as_current_span(func.__name__) as span:
|
|
383
|
+
if metadata:
|
|
384
|
+
for key, value in metadata.items():
|
|
385
|
+
span.set_attribute(key, value)
|
|
386
|
+
|
|
387
|
+
span.set_attribute(
|
|
388
|
+
"function_input", str({"args": args, "kwargs": kwargs})
|
|
389
|
+
)
|
|
390
|
+
result = func(*args, **kwargs)
|
|
391
|
+
span.set_attribute("function_output", str(result))
|
|
392
|
+
|
|
393
|
+
return result
|
|
394
|
+
else:
|
|
395
|
+
return func(*args, **kwargs)
|
|
396
|
+
|
|
397
|
+
@wraps(func)
|
|
398
|
+
async def async_wrapper(*args, **kwargs):
|
|
399
|
+
if self.tracer:
|
|
400
|
+
with self.tracer.start_as_current_span(func.__name__) as span:
|
|
401
|
+
if metadata:
|
|
402
|
+
for key, value in metadata.items():
|
|
403
|
+
span.set_attribute(key, value)
|
|
404
|
+
|
|
405
|
+
span.set_attribute(
|
|
406
|
+
"function_input", str({"args": args, "kwargs": kwargs})
|
|
407
|
+
)
|
|
408
|
+
result = await func(*args, **kwargs)
|
|
409
|
+
span.set_attribute("function_output", str(result))
|
|
410
|
+
|
|
411
|
+
return result
|
|
412
|
+
else:
|
|
413
|
+
return await func(*args, **kwargs)
|
|
414
|
+
|
|
415
|
+
return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
|
|
416
|
+
|
|
417
|
+
return decorator
|
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
import datetime
|
|
2
|
+
import inspect
|
|
3
|
+
import re
|
|
4
|
+
|
|
5
|
+
from promptlayer.utils import async_wrapper, promptlayer_api_handler
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class PromptLayerBase(object):
|
|
9
|
+
__slots__ = [
|
|
10
|
+
"_obj",
|
|
11
|
+
"__weakref__",
|
|
12
|
+
"_function_name",
|
|
13
|
+
"_provider_type",
|
|
14
|
+
"_api_key",
|
|
15
|
+
"_tracer",
|
|
16
|
+
]
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self, obj, function_name="", provider_type="openai", api_key=None, tracer=None
|
|
20
|
+
):
|
|
21
|
+
object.__setattr__(self, "_obj", obj)
|
|
22
|
+
object.__setattr__(self, "_function_name", function_name)
|
|
23
|
+
object.__setattr__(self, "_provider_type", provider_type)
|
|
24
|
+
object.__setattr__(self, "_api_key", api_key)
|
|
25
|
+
object.__setattr__(self, "_tracer", tracer)
|
|
26
|
+
|
|
27
|
+
def __getattr__(self, name):
|
|
28
|
+
attr = getattr(object.__getattribute__(self, "_obj"), name)
|
|
29
|
+
|
|
30
|
+
if (
|
|
31
|
+
name != "count_tokens" # fix for anthropic count_tokens
|
|
32
|
+
and not re.match(
|
|
33
|
+
r"<class 'anthropic\..*Error'>", str(attr)
|
|
34
|
+
) # fix for anthropic errors
|
|
35
|
+
and not re.match(
|
|
36
|
+
r"<class 'openai\..*Error'>", str(attr)
|
|
37
|
+
) # fix for openai errors
|
|
38
|
+
and (
|
|
39
|
+
inspect.isclass(attr)
|
|
40
|
+
or inspect.isfunction(attr)
|
|
41
|
+
or inspect.ismethod(attr)
|
|
42
|
+
or str(type(attr))
|
|
43
|
+
== "<class 'anthropic.resources.completions.Completions'>"
|
|
44
|
+
or str(type(attr))
|
|
45
|
+
== "<class 'anthropic.resources.completions.AsyncCompletions'>"
|
|
46
|
+
or str(type(attr)) == "<class 'anthropic.resources.messages.Messages'>"
|
|
47
|
+
or str(type(attr))
|
|
48
|
+
== "<class 'anthropic.resources.messages.AsyncMessages'>"
|
|
49
|
+
or re.match(r"<class 'openai\.resources.*'>", str(type(attr)))
|
|
50
|
+
)
|
|
51
|
+
):
|
|
52
|
+
return PromptLayerBase(
|
|
53
|
+
attr,
|
|
54
|
+
function_name=f'{object.__getattribute__(self, "_function_name")}.{name}',
|
|
55
|
+
provider_type=object.__getattribute__(self, "_provider_type"),
|
|
56
|
+
api_key=object.__getattribute__(self, "_api_key"),
|
|
57
|
+
tracer=object.__getattribute__(self, "_tracer"),
|
|
58
|
+
)
|
|
59
|
+
return attr
|
|
60
|
+
|
|
61
|
+
def __delattr__(self, name):
|
|
62
|
+
delattr(object.__getattribute__(self, "_obj"), name)
|
|
63
|
+
|
|
64
|
+
def __setattr__(self, name, value):
|
|
65
|
+
setattr(object.__getattribute__(self, "_obj"), name, value)
|
|
66
|
+
|
|
67
|
+
def __call__(self, *args, **kwargs):
|
|
68
|
+
tags = kwargs.pop("pl_tags", None)
|
|
69
|
+
if tags is not None and not isinstance(tags, list):
|
|
70
|
+
raise Exception("pl_tags must be a list of strings.")
|
|
71
|
+
|
|
72
|
+
return_pl_id = kwargs.pop("return_pl_id", False)
|
|
73
|
+
request_start_time = datetime.datetime.now().timestamp()
|
|
74
|
+
function_object = object.__getattribute__(self, "_obj")
|
|
75
|
+
tracer = object.__getattribute__(self, "_tracer")
|
|
76
|
+
function_name = object.__getattribute__(self, "_function_name")
|
|
77
|
+
|
|
78
|
+
if tracer:
|
|
79
|
+
with tracer.start_as_current_span(function_name) as llm_request_span:
|
|
80
|
+
llm_request_span_id = hex(llm_request_span.context.span_id)[2:].zfill(
|
|
81
|
+
16
|
|
82
|
+
)
|
|
83
|
+
llm_request_span.set_attribute(
|
|
84
|
+
"provider", object.__getattribute__(self, "_provider_type")
|
|
85
|
+
)
|
|
86
|
+
llm_request_span.set_attribute("function_name", function_name)
|
|
87
|
+
llm_request_span.set_attribute(
|
|
88
|
+
"function_input", str({"args": args, "kwargs": kwargs})
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
if inspect.isclass(function_object):
|
|
92
|
+
result = PromptLayerBase(
|
|
93
|
+
function_object(*args, **kwargs),
|
|
94
|
+
function_name=function_name,
|
|
95
|
+
provider_type=object.__getattribute__(self, "_provider_type"),
|
|
96
|
+
api_key=object.__getattribute__(self, "_api_key"),
|
|
97
|
+
tracer=tracer,
|
|
98
|
+
)
|
|
99
|
+
llm_request_span.set_attribute("function_output", str(result))
|
|
100
|
+
return result
|
|
101
|
+
|
|
102
|
+
function_response = function_object(*args, **kwargs)
|
|
103
|
+
|
|
104
|
+
if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(
|
|
105
|
+
function_response
|
|
106
|
+
):
|
|
107
|
+
return async_wrapper(
|
|
108
|
+
function_response,
|
|
109
|
+
return_pl_id,
|
|
110
|
+
request_start_time,
|
|
111
|
+
function_name,
|
|
112
|
+
object.__getattribute__(self, "_provider_type"),
|
|
113
|
+
tags,
|
|
114
|
+
api_key=object.__getattribute__(self, "_api_key"),
|
|
115
|
+
llm_request_span_id=llm_request_span_id,
|
|
116
|
+
tracer=tracer, # Pass the tracer to async_wrapper
|
|
117
|
+
*args,
|
|
118
|
+
**kwargs,
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
request_end_time = datetime.datetime.now().timestamp()
|
|
122
|
+
result = promptlayer_api_handler(
|
|
123
|
+
function_name,
|
|
124
|
+
object.__getattribute__(self, "_provider_type"),
|
|
125
|
+
args,
|
|
126
|
+
kwargs,
|
|
127
|
+
tags,
|
|
128
|
+
function_response,
|
|
129
|
+
request_start_time,
|
|
130
|
+
request_end_time,
|
|
131
|
+
object.__getattribute__(self, "_api_key"),
|
|
132
|
+
return_pl_id=return_pl_id,
|
|
133
|
+
llm_request_span_id=llm_request_span_id,
|
|
134
|
+
)
|
|
135
|
+
llm_request_span.set_attribute("function_output", str(result))
|
|
136
|
+
return result
|
|
137
|
+
else:
|
|
138
|
+
# Without tracing
|
|
139
|
+
if inspect.isclass(function_object):
|
|
140
|
+
return PromptLayerBase(
|
|
141
|
+
function_object(*args, **kwargs),
|
|
142
|
+
function_name=function_name,
|
|
143
|
+
provider_type=object.__getattribute__(self, "_provider_type"),
|
|
144
|
+
api_key=object.__getattribute__(self, "_api_key"),
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
function_response = function_object(*args, **kwargs)
|
|
148
|
+
|
|
149
|
+
if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(
|
|
150
|
+
function_response
|
|
151
|
+
):
|
|
152
|
+
return async_wrapper(
|
|
153
|
+
function_response,
|
|
154
|
+
return_pl_id,
|
|
155
|
+
request_start_time,
|
|
156
|
+
function_name,
|
|
157
|
+
object.__getattribute__(self, "_provider_type"),
|
|
158
|
+
tags,
|
|
159
|
+
api_key=object.__getattribute__(self, "_api_key"),
|
|
160
|
+
*args,
|
|
161
|
+
**kwargs,
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
request_end_time = datetime.datetime.now().timestamp()
|
|
165
|
+
return promptlayer_api_handler(
|
|
166
|
+
function_name,
|
|
167
|
+
object.__getattribute__(self, "_provider_type"),
|
|
168
|
+
args,
|
|
169
|
+
kwargs,
|
|
170
|
+
tags,
|
|
171
|
+
function_response,
|
|
172
|
+
request_start_time,
|
|
173
|
+
request_end_time,
|
|
174
|
+
object.__getattribute__(self, "_api_key"),
|
|
175
|
+
return_pl_id=return_pl_id,
|
|
176
|
+
)
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
from typing import Sequence
|
|
2
|
+
|
|
3
|
+
import requests
|
|
4
|
+
from opentelemetry.sdk.trace import ReadableSpan
|
|
5
|
+
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
|
|
6
|
+
|
|
7
|
+
from promptlayer.utils import URL_API_PROMPTLAYER
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class PromptLayerSpanExporter(SpanExporter):
|
|
11
|
+
def __init__(self, api_key: str = None, workspace_id: int = None):
|
|
12
|
+
self.api_key = api_key
|
|
13
|
+
self.url = f"{URL_API_PROMPTLAYER}/spans-bulk"
|
|
14
|
+
self.workspace_id = workspace_id
|
|
15
|
+
|
|
16
|
+
def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
|
|
17
|
+
request_data = []
|
|
18
|
+
|
|
19
|
+
for span in spans:
|
|
20
|
+
span_info = {
|
|
21
|
+
"name": span.name,
|
|
22
|
+
"context": {
|
|
23
|
+
"trace_id": hex(span.context.trace_id)[2:].zfill(
|
|
24
|
+
32
|
|
25
|
+
), # Ensure 32 characters
|
|
26
|
+
"span_id": hex(span.context.span_id)[2:].zfill(
|
|
27
|
+
16
|
|
28
|
+
), # Ensure 16 characters
|
|
29
|
+
"trace_state": str(span.context.trace_state),
|
|
30
|
+
},
|
|
31
|
+
"kind": str(span.kind),
|
|
32
|
+
"parent_id": hex(span.parent.span_id)[2:] if span.parent else None,
|
|
33
|
+
"start_time": span.start_time,
|
|
34
|
+
"end_time": span.end_time,
|
|
35
|
+
"status": {
|
|
36
|
+
"status_code": str(span.status.status_code),
|
|
37
|
+
"description": span.status.description,
|
|
38
|
+
},
|
|
39
|
+
"attributes": dict(span.attributes),
|
|
40
|
+
"events": [
|
|
41
|
+
{
|
|
42
|
+
"name": event.name,
|
|
43
|
+
"timestamp": event.timestamp,
|
|
44
|
+
"attributes": dict(event.attributes),
|
|
45
|
+
}
|
|
46
|
+
for event in span.events
|
|
47
|
+
],
|
|
48
|
+
"links": [
|
|
49
|
+
{"context": link.context, "attributes": dict(link.attributes)}
|
|
50
|
+
for link in span.links
|
|
51
|
+
],
|
|
52
|
+
"resource": {
|
|
53
|
+
"attributes": dict(span.resource.attributes),
|
|
54
|
+
"schema_url": span.resource.schema_url,
|
|
55
|
+
},
|
|
56
|
+
}
|
|
57
|
+
request_data.append(span_info)
|
|
58
|
+
|
|
59
|
+
try:
|
|
60
|
+
response = requests.post(
|
|
61
|
+
self.url,
|
|
62
|
+
headers={
|
|
63
|
+
"X-Api-Key": self.api_key,
|
|
64
|
+
"Content-Type": "application/json",
|
|
65
|
+
},
|
|
66
|
+
json={
|
|
67
|
+
"spans": request_data,
|
|
68
|
+
"workspace_id": self.workspace_id,
|
|
69
|
+
},
|
|
70
|
+
)
|
|
71
|
+
response.raise_for_status()
|
|
72
|
+
return SpanExportResult.SUCCESS
|
|
73
|
+
except requests.RequestException:
|
|
74
|
+
return SpanExportResult.FAILURE
|
|
75
|
+
|
|
76
|
+
def shutdown(self):
|
|
77
|
+
pass
|
promptlayer/utils.py
CHANGED
|
@@ -11,6 +11,7 @@ from enum import Enum
|
|
|
11
11
|
from typing import Callable, Generator, List, Union
|
|
12
12
|
|
|
13
13
|
import requests
|
|
14
|
+
from opentelemetry import context, trace
|
|
14
15
|
|
|
15
16
|
from promptlayer.types.prompt_template import (
|
|
16
17
|
GetPromptTemplate,
|
|
@@ -36,6 +37,7 @@ def promptlayer_api_handler(
|
|
|
36
37
|
request_end_time,
|
|
37
38
|
api_key,
|
|
38
39
|
return_pl_id=False,
|
|
40
|
+
llm_request_span_id=None,
|
|
39
41
|
):
|
|
40
42
|
if (
|
|
41
43
|
isinstance(response, types.GeneratorType)
|
|
@@ -49,8 +51,8 @@ def promptlayer_api_handler(
|
|
|
49
51
|
]
|
|
50
52
|
):
|
|
51
53
|
return GeneratorProxy(
|
|
52
|
-
response,
|
|
53
|
-
{
|
|
54
|
+
generator=response,
|
|
55
|
+
api_request_arguments={
|
|
54
56
|
"function_name": function_name,
|
|
55
57
|
"provider_type": provider_type,
|
|
56
58
|
"args": args,
|
|
@@ -59,21 +61,23 @@ def promptlayer_api_handler(
|
|
|
59
61
|
"request_start_time": request_start_time,
|
|
60
62
|
"request_end_time": request_end_time,
|
|
61
63
|
"return_pl_id": return_pl_id,
|
|
64
|
+
"llm_request_span_id": llm_request_span_id,
|
|
62
65
|
},
|
|
63
|
-
api_key,
|
|
66
|
+
api_key=api_key,
|
|
64
67
|
)
|
|
65
68
|
else:
|
|
66
69
|
request_id = promptlayer_api_request(
|
|
67
|
-
function_name,
|
|
68
|
-
provider_type,
|
|
69
|
-
args,
|
|
70
|
-
kwargs,
|
|
71
|
-
tags,
|
|
72
|
-
response,
|
|
73
|
-
request_start_time,
|
|
74
|
-
request_end_time,
|
|
75
|
-
api_key,
|
|
70
|
+
function_name=function_name,
|
|
71
|
+
provider_type=provider_type,
|
|
72
|
+
args=args,
|
|
73
|
+
kwargs=kwargs,
|
|
74
|
+
tags=tags,
|
|
75
|
+
response=response,
|
|
76
|
+
request_start_time=request_start_time,
|
|
77
|
+
request_end_time=request_end_time,
|
|
78
|
+
api_key=api_key,
|
|
76
79
|
return_pl_id=return_pl_id,
|
|
80
|
+
llm_request_span_id=llm_request_span_id,
|
|
77
81
|
)
|
|
78
82
|
if return_pl_id:
|
|
79
83
|
return response, request_id
|
|
@@ -91,6 +95,7 @@ async def promptlayer_api_handler_async(
|
|
|
91
95
|
request_end_time,
|
|
92
96
|
api_key,
|
|
93
97
|
return_pl_id=False,
|
|
98
|
+
llm_request_span_id=None,
|
|
94
99
|
):
|
|
95
100
|
return await run_in_thread_async(
|
|
96
101
|
None,
|
|
@@ -105,6 +110,7 @@ async def promptlayer_api_handler_async(
|
|
|
105
110
|
request_end_time,
|
|
106
111
|
api_key,
|
|
107
112
|
return_pl_id=return_pl_id,
|
|
113
|
+
llm_request_span_id=llm_request_span_id,
|
|
108
114
|
)
|
|
109
115
|
|
|
110
116
|
|
|
@@ -124,6 +130,7 @@ def convert_native_object_to_dict(native_object):
|
|
|
124
130
|
|
|
125
131
|
|
|
126
132
|
def promptlayer_api_request(
|
|
133
|
+
*,
|
|
127
134
|
function_name,
|
|
128
135
|
provider_type,
|
|
129
136
|
args,
|
|
@@ -135,6 +142,7 @@ def promptlayer_api_request(
|
|
|
135
142
|
api_key,
|
|
136
143
|
return_pl_id=False,
|
|
137
144
|
metadata=None,
|
|
145
|
+
llm_request_span_id=None,
|
|
138
146
|
):
|
|
139
147
|
if isinstance(response, dict) and hasattr(response, "to_dict_recursive"):
|
|
140
148
|
response = response.to_dict_recursive()
|
|
@@ -157,6 +165,7 @@ def promptlayer_api_request(
|
|
|
157
165
|
"request_end_time": request_end_time,
|
|
158
166
|
"metadata": metadata,
|
|
159
167
|
"api_key": api_key,
|
|
168
|
+
"span_id": llm_request_span_id,
|
|
160
169
|
},
|
|
161
170
|
)
|
|
162
171
|
if not hasattr(request_response, "status_code"):
|
|
@@ -178,6 +187,26 @@ def promptlayer_api_request(
|
|
|
178
187
|
return request_response.json().get("request_id")
|
|
179
188
|
|
|
180
189
|
|
|
190
|
+
def track_request(**body):
|
|
191
|
+
try:
|
|
192
|
+
response = requests.post(
|
|
193
|
+
f"{URL_API_PROMPTLAYER}/track-request",
|
|
194
|
+
json=body,
|
|
195
|
+
)
|
|
196
|
+
if response.status_code != 200:
|
|
197
|
+
warn_on_bad_response(
|
|
198
|
+
response,
|
|
199
|
+
f"PromptLayer had the following error while tracking your request: {response.text}",
|
|
200
|
+
)
|
|
201
|
+
return response.json()
|
|
202
|
+
except requests.exceptions.RequestException as e:
|
|
203
|
+
print(
|
|
204
|
+
f"WARNING: While logging your request PromptLayer had the following error: {e}",
|
|
205
|
+
file=sys.stderr,
|
|
206
|
+
)
|
|
207
|
+
return {}
|
|
208
|
+
|
|
209
|
+
|
|
181
210
|
def promptlayer_api_request_async(
|
|
182
211
|
function_name,
|
|
183
212
|
provider_type,
|
|
@@ -193,15 +222,15 @@ def promptlayer_api_request_async(
|
|
|
193
222
|
return run_in_thread_async(
|
|
194
223
|
None,
|
|
195
224
|
promptlayer_api_request,
|
|
196
|
-
function_name,
|
|
197
|
-
provider_type,
|
|
198
|
-
args,
|
|
199
|
-
kwargs,
|
|
200
|
-
tags,
|
|
201
|
-
response,
|
|
202
|
-
request_start_time,
|
|
203
|
-
request_end_time,
|
|
204
|
-
api_key,
|
|
225
|
+
function_name=function_name,
|
|
226
|
+
provider_type=provider_type,
|
|
227
|
+
args=args,
|
|
228
|
+
kwargs=kwargs,
|
|
229
|
+
tags=tags,
|
|
230
|
+
response=response,
|
|
231
|
+
request_start_time=request_start_time,
|
|
232
|
+
request_end_time=request_end_time,
|
|
233
|
+
api_key=api_key,
|
|
205
234
|
return_pl_id=return_pl_id,
|
|
206
235
|
)
|
|
207
236
|
|
|
@@ -396,6 +425,7 @@ class GeneratorProxy:
|
|
|
396
425
|
self.results.append(result)
|
|
397
426
|
provider_type = self.api_request_arugments["provider_type"]
|
|
398
427
|
end_anthropic = False
|
|
428
|
+
|
|
399
429
|
if provider_type == "anthropic":
|
|
400
430
|
if hasattr(result, "stop_reason"):
|
|
401
431
|
end_anthropic = result.stop_reason
|
|
@@ -403,6 +433,7 @@ class GeneratorProxy:
|
|
|
403
433
|
end_anthropic = result.message.stop_reason
|
|
404
434
|
elif hasattr(result, "type") and result.type == "message_stop":
|
|
405
435
|
end_anthropic = True
|
|
436
|
+
|
|
406
437
|
end_openai = provider_type == "openai" and (
|
|
407
438
|
result.choices[0].finish_reason == "stop"
|
|
408
439
|
or result.choices[0].finish_reason == "length"
|
|
@@ -410,21 +441,27 @@ class GeneratorProxy:
|
|
|
410
441
|
|
|
411
442
|
if end_anthropic or end_openai:
|
|
412
443
|
request_id = promptlayer_api_request(
|
|
413
|
-
self.api_request_arugments["function_name"],
|
|
414
|
-
self.api_request_arugments["provider_type"],
|
|
415
|
-
self.api_request_arugments["args"],
|
|
416
|
-
self.api_request_arugments["kwargs"],
|
|
417
|
-
self.api_request_arugments["tags"],
|
|
418
|
-
self.cleaned_result(),
|
|
419
|
-
self.api_request_arugments["request_start_time"],
|
|
420
|
-
self.api_request_arugments["request_end_time"],
|
|
421
|
-
self.api_key,
|
|
444
|
+
function_name=self.api_request_arugments["function_name"],
|
|
445
|
+
provider_type=self.api_request_arugments["provider_type"],
|
|
446
|
+
args=self.api_request_arugments["args"],
|
|
447
|
+
kwargs=self.api_request_arugments["kwargs"],
|
|
448
|
+
tags=self.api_request_arugments["tags"],
|
|
449
|
+
response=self.cleaned_result(),
|
|
450
|
+
request_start_time=self.api_request_arugments["request_start_time"],
|
|
451
|
+
request_end_time=self.api_request_arugments["request_end_time"],
|
|
452
|
+
api_key=self.api_key,
|
|
422
453
|
return_pl_id=self.api_request_arugments["return_pl_id"],
|
|
454
|
+
llm_request_span_id=self.api_request_arugments.get(
|
|
455
|
+
"llm_request_span_id"
|
|
456
|
+
),
|
|
423
457
|
)
|
|
458
|
+
|
|
424
459
|
if self.api_request_arugments["return_pl_id"]:
|
|
425
460
|
return result, request_id
|
|
461
|
+
|
|
426
462
|
if self.api_request_arugments["return_pl_id"]:
|
|
427
463
|
return result, None
|
|
464
|
+
|
|
428
465
|
return result
|
|
429
466
|
|
|
430
467
|
def cleaned_result(self):
|
|
@@ -531,23 +568,39 @@ async def async_wrapper(
|
|
|
531
568
|
provider_type,
|
|
532
569
|
tags,
|
|
533
570
|
api_key: str = None,
|
|
571
|
+
llm_request_span_id: str = None,
|
|
572
|
+
tracer=None,
|
|
534
573
|
*args,
|
|
535
574
|
**kwargs,
|
|
536
575
|
):
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
576
|
+
current_context = context.get_current()
|
|
577
|
+
token = context.attach(current_context)
|
|
578
|
+
|
|
579
|
+
try:
|
|
580
|
+
response = await coroutine_obj
|
|
581
|
+
request_end_time = datetime.datetime.now().timestamp()
|
|
582
|
+
result = await promptlayer_api_handler_async(
|
|
583
|
+
function_name,
|
|
584
|
+
provider_type,
|
|
585
|
+
args,
|
|
586
|
+
kwargs,
|
|
587
|
+
tags,
|
|
588
|
+
response,
|
|
589
|
+
request_start_time,
|
|
590
|
+
request_end_time,
|
|
591
|
+
api_key,
|
|
592
|
+
return_pl_id=return_pl_id,
|
|
593
|
+
llm_request_span_id=llm_request_span_id,
|
|
594
|
+
)
|
|
595
|
+
|
|
596
|
+
if tracer:
|
|
597
|
+
current_span = trace.get_current_span()
|
|
598
|
+
if current_span:
|
|
599
|
+
current_span.set_attribute("function_output", str(result))
|
|
600
|
+
|
|
601
|
+
return result
|
|
602
|
+
finally:
|
|
603
|
+
context.detach(token)
|
|
551
604
|
|
|
552
605
|
|
|
553
606
|
def promptlayer_create_group(api_key: str = None):
|
|
@@ -665,26 +718,6 @@ def get_all_prompt_templates(
|
|
|
665
718
|
)
|
|
666
719
|
|
|
667
720
|
|
|
668
|
-
def track_request(**body):
|
|
669
|
-
try:
|
|
670
|
-
response = requests.post(
|
|
671
|
-
f"{URL_API_PROMPTLAYER}/track-request",
|
|
672
|
-
json=body,
|
|
673
|
-
)
|
|
674
|
-
if response.status_code != 200:
|
|
675
|
-
warn_on_bad_response(
|
|
676
|
-
response,
|
|
677
|
-
f"PromptLayer had the following error while tracking your request: {response.text}",
|
|
678
|
-
)
|
|
679
|
-
return response.json()
|
|
680
|
-
except requests.exceptions.RequestException as e:
|
|
681
|
-
print(
|
|
682
|
-
f"WARNING: While logging your request PromptLayer had the following error: {e}",
|
|
683
|
-
file=sys.stderr,
|
|
684
|
-
)
|
|
685
|
-
return {}
|
|
686
|
-
|
|
687
|
-
|
|
688
721
|
def openai_stream_chat(results: list):
|
|
689
722
|
from openai.types.chat import (
|
|
690
723
|
ChatCompletion,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: promptlayer
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.13
|
|
4
4
|
Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
|
|
5
5
|
License: Apache-2.0
|
|
6
6
|
Author: Magniv
|
|
@@ -12,6 +12,8 @@ Classifier: Programming Language :: Python :: 3.9
|
|
|
12
12
|
Classifier: Programming Language :: Python :: 3.10
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.11
|
|
14
14
|
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Requires-Dist: opentelemetry-api (>=1.26.0,<2.0.0)
|
|
16
|
+
Requires-Dist: opentelemetry-sdk (>=1.26.0,<2.0.0)
|
|
15
17
|
Requires-Dist: requests (>=2.31.0,<3.0.0)
|
|
16
18
|
Description-Content-Type: text/markdown
|
|
17
19
|
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
promptlayer/__init__.py,sha256=n3ZkXiXzVzdqDwSJz8GiMPtZkRoDHGzQ4IlBKuCY2h4,102
|
|
2
|
+
promptlayer/groups/__init__.py,sha256=-xs-2cn0nc0D_5YxZr3nC86iTdRVZmBhEpOKDJXE-sQ,224
|
|
3
|
+
promptlayer/groups/groups.py,sha256=yeO6T0TM3qB0ondZRiHhcH8G06YygrpFoM8b9RmoIao,165
|
|
4
|
+
promptlayer/promptlayer.py,sha256=Qi0-rrEsmVhT-VtBm5MtKKVI6mOpO39YvFlLppMdGP4,15519
|
|
5
|
+
promptlayer/promptlayer_base.py,sha256=sev-EZehRXJSZSmJtMkqmAUK1345pqbDY_lNjPP5MYA,7158
|
|
6
|
+
promptlayer/span_exporter.py,sha256=b8sn9bJIUbYDmme5Md3g2K3X62RGDPMDYdkk_8ZETbk,2780
|
|
7
|
+
promptlayer/templates.py,sha256=aY_-BCrL0AgIdYEUE28pi0AP_avTVAgwv5hgzrh75vo,717
|
|
8
|
+
promptlayer/track/__init__.py,sha256=VheO_Au0lffGlPKYYPQwkv8ci16wSXABCVSNRoFWu_w,945
|
|
9
|
+
promptlayer/track/track.py,sha256=XNEZT9yNiRBPp9vaDZo_f0dP_ldOu8q1qafpVfS5Ze8,1610
|
|
10
|
+
promptlayer/types/__init__.py,sha256=ulWSyCrk5hZ_PI-nKGpd6GPcRaK8lqP4wFl0LPNUYWk,61
|
|
11
|
+
promptlayer/types/prompt_template.py,sha256=QbxYSeIubrwp8KmDKdt9syAwzONFPh_So9yr4H73ANQ,4429
|
|
12
|
+
promptlayer/utils.py,sha256=-p0qapUvkZYJd_Dfat3c8LANXWU1JN0bJB91IyjB8iA,29656
|
|
13
|
+
promptlayer-1.0.13.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
14
|
+
promptlayer-1.0.13.dist-info/METADATA,sha256=fasBS56ZGwGFuoCtHb57dPiq6S1alUnptzyg0dBrE-o,4609
|
|
15
|
+
promptlayer-1.0.13.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
16
|
+
promptlayer-1.0.13.dist-info/RECORD,,
|
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
promptlayer/__init__.py,sha256=Nk_YAsoldO0cNy-KQ9rlsdzRryWr9oEJMjqAIlXiOvs,6379
|
|
2
|
-
promptlayer/groups/__init__.py,sha256=-xs-2cn0nc0D_5YxZr3nC86iTdRVZmBhEpOKDJXE-sQ,224
|
|
3
|
-
promptlayer/groups/groups.py,sha256=yeO6T0TM3qB0ondZRiHhcH8G06YygrpFoM8b9RmoIao,165
|
|
4
|
-
promptlayer/promptlayer.py,sha256=1q1cZOBt27Luzu3aRWcYUjQiKbmwrF9R62Uw95Ryqqc,4035
|
|
5
|
-
promptlayer/templates.py,sha256=aY_-BCrL0AgIdYEUE28pi0AP_avTVAgwv5hgzrh75vo,717
|
|
6
|
-
promptlayer/track/__init__.py,sha256=VheO_Au0lffGlPKYYPQwkv8ci16wSXABCVSNRoFWu_w,945
|
|
7
|
-
promptlayer/track/track.py,sha256=XNEZT9yNiRBPp9vaDZo_f0dP_ldOu8q1qafpVfS5Ze8,1610
|
|
8
|
-
promptlayer/types/__init__.py,sha256=ulWSyCrk5hZ_PI-nKGpd6GPcRaK8lqP4wFl0LPNUYWk,61
|
|
9
|
-
promptlayer/types/prompt_template.py,sha256=QbxYSeIubrwp8KmDKdt9syAwzONFPh_So9yr4H73ANQ,4429
|
|
10
|
-
promptlayer/utils.py,sha256=AgzU6dcqUNkek_iH5Gh8nR8YiLOMiah3ejnC9l2Y9XE,28335
|
|
11
|
-
promptlayer-1.0.11.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
12
|
-
promptlayer-1.0.11.dist-info/METADATA,sha256=SYsOwgSi9oSzv36_4s3uEHjQ9kFJ6C9SZfx9LcTF3y0,4507
|
|
13
|
-
promptlayer-1.0.11.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
14
|
-
promptlayer-1.0.11.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|