promptlayer 1.0.16__py3-none-any.whl → 1.0.78__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,106 @@
1
+ from typing import Any, AsyncGenerator, AsyncIterable, Callable, Dict, Generator
2
+
3
+ from .blueprint_builder import (
4
+ build_prompt_blueprint_from_anthropic_event,
5
+ build_prompt_blueprint_from_bedrock_event,
6
+ build_prompt_blueprint_from_google_event,
7
+ build_prompt_blueprint_from_openai_chunk,
8
+ build_prompt_blueprint_from_openai_responses_event,
9
+ )
10
+
11
+
12
+ def _build_stream_blueprint(result: Any, metadata: Dict) -> Any:
13
+ model_info = metadata.get("model", {}) if metadata else {}
14
+ provider = model_info.get("provider", "")
15
+ model_name = model_info.get("name", "")
16
+
17
+ if provider == "openai" or provider == "openai.azure":
18
+ api_type = model_info.get("api_type", "chat-completions") if metadata else "chat-completions"
19
+ if api_type == "chat-completions":
20
+ return build_prompt_blueprint_from_openai_chunk(result, metadata)
21
+ elif api_type == "responses":
22
+ return build_prompt_blueprint_from_openai_responses_event(result, metadata)
23
+
24
+ elif provider == "google" or (provider == "vertexai" and model_name.startswith("gemini")):
25
+ return build_prompt_blueprint_from_google_event(result, metadata)
26
+
27
+ elif provider in ("anthropic", "anthropic.bedrock") or (provider == "vertexai" and model_name.startswith("claude")):
28
+ return build_prompt_blueprint_from_anthropic_event(result, metadata)
29
+
30
+ elif provider == "mistral":
31
+ return build_prompt_blueprint_from_openai_chunk(result.data, metadata)
32
+
33
+ elif provider == "amazon.bedrock":
34
+ return build_prompt_blueprint_from_bedrock_event(result, metadata)
35
+
36
+ return None
37
+
38
+
39
+ def _build_stream_data(result: Any, stream_blueprint: Any, request_id: Any = None) -> Dict[str, Any]:
40
+ return {
41
+ "request_id": request_id,
42
+ "raw_response": result,
43
+ "prompt_blueprint": stream_blueprint,
44
+ }
45
+
46
+
47
+ def stream_response(*, generator: Generator, after_stream: Callable, map_results: Callable, metadata: Dict):
48
+ results = []
49
+ provider = metadata.get("model", {}).get("provider", "")
50
+ if provider == "amazon.bedrock":
51
+ response_metadata = generator.get("ResponseMetadata", {})
52
+ generator = generator.get("stream", generator)
53
+
54
+ for result in generator:
55
+ results.append(result)
56
+
57
+ stream_blueprint = _build_stream_blueprint(result, metadata)
58
+ data = _build_stream_data(result, stream_blueprint)
59
+ yield data
60
+
61
+ request_response = map_results(results)
62
+ if provider == "amazon.bedrock":
63
+ request_response["ResponseMetadata"] = response_metadata
64
+ else:
65
+ request_response = request_response.model_dump(mode="json")
66
+
67
+ response = after_stream(request_response=request_response)
68
+ data["request_id"] = response.get("request_id")
69
+ data["prompt_blueprint"] = response.get("prompt_blueprint")
70
+ yield data
71
+
72
+
73
+ async def astream_response(
74
+ generator: AsyncIterable[Any],
75
+ after_stream: Callable[..., Any],
76
+ map_results: Callable[[Any], Any],
77
+ metadata: Dict[str, Any] = None,
78
+ ) -> AsyncGenerator[Dict[str, Any], None]:
79
+ results = []
80
+ provider = metadata.get("model", {}).get("provider", "")
81
+ if provider == "amazon.bedrock":
82
+ response_metadata = generator.get("ResponseMetadata", {})
83
+ generator = generator.get("stream", generator)
84
+
85
+ async for result in generator:
86
+ results.append(result)
87
+
88
+ stream_blueprint = _build_stream_blueprint(result, metadata)
89
+ data = _build_stream_data(result, stream_blueprint)
90
+ yield data
91
+
92
+ async def async_generator_from_list(lst):
93
+ for item in lst:
94
+ yield item
95
+
96
+ request_response = await map_results(async_generator_from_list(results))
97
+
98
+ if provider == "amazon.bedrock":
99
+ request_response["ResponseMetadata"] = response_metadata
100
+ else:
101
+ request_response = request_response.model_dump(mode="json")
102
+
103
+ after_stream_response = await after_stream(request_response=request_response)
104
+ data["request_id"] = after_stream_response.get("request_id")
105
+ data["prompt_blueprint"] = after_stream_response.get("prompt_blueprint")
106
+ yield data
promptlayer/templates.py CHANGED
@@ -2,6 +2,8 @@ from typing import Union
2
2
 
3
3
  from promptlayer.types.prompt_template import GetPromptTemplate, PublishPromptTemplate
4
4
  from promptlayer.utils import (
5
+ aget_all_prompt_templates,
6
+ aget_prompt_template,
5
7
  get_all_prompt_templates,
6
8
  get_prompt_template,
7
9
  publish_prompt_template,
@@ -9,14 +11,29 @@ from promptlayer.utils import (
9
11
 
10
12
 
11
13
  class TemplateManager:
12
- def __init__(self, api_key: str):
14
+ def __init__(self, api_key: str, base_url: str, throw_on_error: bool):
13
15
  self.api_key = api_key
16
+ self.base_url = base_url
17
+ self.throw_on_error = throw_on_error
14
18
 
15
19
  def get(self, prompt_name: str, params: Union[GetPromptTemplate, None] = None):
16
- return get_prompt_template(prompt_name, params, self.api_key)
20
+ return get_prompt_template(self.api_key, self.base_url, self.throw_on_error, prompt_name, params)
17
21
 
18
22
  def publish(self, body: PublishPromptTemplate):
19
- return publish_prompt_template(body, self.api_key)
23
+ return publish_prompt_template(self.api_key, self.base_url, self.throw_on_error, body)
20
24
 
21
- def all(self, page: int = 1, per_page: int = 30):
22
- return get_all_prompt_templates(page, per_page, self.api_key)
25
+ def all(self, page: int = 1, per_page: int = 30, label: str = None):
26
+ return get_all_prompt_templates(self.api_key, self.base_url, self.throw_on_error, page, per_page, label)
27
+
28
+
29
+ class AsyncTemplateManager:
30
+ def __init__(self, api_key: str, base_url: str, throw_on_error: bool):
31
+ self.api_key = api_key
32
+ self.base_url = base_url
33
+ self.throw_on_error = throw_on_error
34
+
35
+ async def get(self, prompt_name: str, params: Union[GetPromptTemplate, None] = None):
36
+ return await aget_prompt_template(self.api_key, self.base_url, self.throw_on_error, prompt_name, params)
37
+
38
+ async def all(self, page: int = 1, per_page: int = 30, label: str = None):
39
+ return await aget_all_prompt_templates(self.api_key, self.base_url, self.throw_on_error, page, per_page, label)
@@ -1,33 +1,71 @@
1
- from promptlayer.track.track import group
2
- from promptlayer.track.track import metadata as metadata_
3
- from promptlayer.track.track import prompt
4
- from promptlayer.track.track import score as score_
1
+ from promptlayer.track.track import (
2
+ agroup,
3
+ ametadata,
4
+ aprompt,
5
+ ascore,
6
+ group,
7
+ metadata as metadata_,
8
+ prompt,
9
+ score as score_,
10
+ )
11
+
12
+ # TODO(dmu) LOW: Move this code to another file
5
13
 
6
14
 
7
15
  class TrackManager:
8
- def __init__(self, api_key: str):
16
+ def __init__(self, api_key: str, base_url: str, throw_on_error: bool):
9
17
  self.api_key = api_key
18
+ self.base_url = base_url
19
+ self.throw_on_error = throw_on_error
10
20
 
11
21
  def group(self, request_id, group_id):
12
- return group(request_id, group_id, self.api_key)
22
+ return group(self.api_key, self.base_url, self.throw_on_error, request_id, group_id)
13
23
 
14
24
  def metadata(self, request_id, metadata):
15
- return metadata_(request_id, metadata, self.api_key)
25
+ return metadata_(self.api_key, self.base_url, self.throw_on_error, request_id, metadata)
16
26
 
17
- def prompt(
18
- self, request_id, prompt_name, prompt_input_variables, version=None, label=None
19
- ):
27
+ def prompt(self, request_id, prompt_name, prompt_input_variables, version=None, label=None):
20
28
  return prompt(
29
+ self.api_key,
30
+ self.base_url,
31
+ self.throw_on_error,
21
32
  request_id,
22
33
  prompt_name,
23
34
  prompt_input_variables,
24
35
  version,
25
36
  label,
26
- self.api_key,
27
37
  )
28
38
 
29
39
  def score(self, request_id, score, score_name=None):
30
- return score_(request_id, score, score_name, self.api_key)
40
+ return score_(self.api_key, self.base_url, self.throw_on_error, request_id, score, score_name)
41
+
42
+
43
+ class AsyncTrackManager:
44
+ def __init__(self, api_key: str, base_url: str, throw_on_error: bool):
45
+ self.api_key = api_key
46
+ self.base_url = base_url
47
+ self.throw_on_error = throw_on_error
48
+
49
+ async def group(self, request_id, group_id):
50
+ return await agroup(self.api_key, self.base_url, self.throw_on_error, request_id, group_id)
51
+
52
+ async def metadata(self, request_id, metadata):
53
+ return await ametadata(self.api_key, self.base_url, self.throw_on_error, request_id, metadata)
54
+
55
+ async def prompt(self, request_id, prompt_name, prompt_input_variables, version=None, label=None):
56
+ return await aprompt(
57
+ self.api_key,
58
+ self.base_url,
59
+ self.throw_on_error,
60
+ request_id,
61
+ prompt_name,
62
+ prompt_input_variables,
63
+ version,
64
+ label,
65
+ )
66
+
67
+ async def score(self, request_id, score, score_name=None):
68
+ return await ascore(self.api_key, self.base_url, self.throw_on_error, request_id, score, score_name)
31
69
 
32
70
 
33
71
  __all__ = ["TrackManager"]
@@ -1,4 +1,9 @@
1
+ from promptlayer import exceptions as _exceptions
1
2
  from promptlayer.utils import (
3
+ apromptlayer_track_group,
4
+ apromptlayer_track_metadata,
5
+ apromptlayer_track_prompt,
6
+ apromptlayer_track_score,
2
7
  promptlayer_track_group,
3
8
  promptlayer_track_metadata,
4
9
  promptlayer_track_prompt,
@@ -7,40 +12,96 @@ from promptlayer.utils import (
7
12
 
8
13
 
9
14
  def prompt(
15
+ api_key: str,
16
+ base_url: str,
17
+ throw_on_error: bool,
10
18
  request_id,
11
19
  prompt_name,
12
20
  prompt_input_variables,
13
21
  version=None,
14
22
  label=None,
15
- api_key: str = None,
16
23
  ):
17
24
  if not isinstance(prompt_input_variables, dict):
18
- raise Exception("Please provide a dictionary of input variables.")
25
+ raise _exceptions.PromptLayerValidationError(
26
+ "Please provide a dictionary of input variables.", response=None, body=None
27
+ )
19
28
  return promptlayer_track_prompt(
20
- request_id, prompt_name, prompt_input_variables, api_key, version, label
29
+ api_key, base_url, throw_on_error, request_id, prompt_name, prompt_input_variables, api_key, version, label
21
30
  )
22
31
 
23
32
 
24
- def metadata(request_id, metadata, api_key: str = None):
33
+ def metadata(api_key: str, base_url: str, throw_on_error: bool, request_id, metadata):
25
34
  if not isinstance(metadata, dict):
26
- raise Exception("Please provide a dictionary of metadata.")
35
+ raise _exceptions.PromptLayerValidationError(
36
+ "Please provide a dictionary of metadata.", response=None, body=None
37
+ )
27
38
  for key, value in metadata.items():
28
39
  if not isinstance(key, str) or not isinstance(value, str):
29
- raise Exception(
30
- "Please provide a dictionary of metadata with key value pair of strings."
40
+ raise _exceptions.PromptLayerValidationError(
41
+ "Please provide a dictionary of metadata with key value pair of strings.", response=None, body=None
31
42
  )
32
- return promptlayer_track_metadata(request_id, metadata, api_key)
43
+ return promptlayer_track_metadata(api_key, base_url, throw_on_error, request_id, metadata)
33
44
 
34
45
 
35
- def score(request_id, score, score_name=None, api_key: str = None):
46
+ def score(api_key: str, base_url: str, throw_on_error: bool, request_id, score, score_name=None):
36
47
  if not isinstance(score, int):
37
- raise Exception("Please provide a int score.")
48
+ raise _exceptions.PromptLayerValidationError("Please provide a int score.", response=None, body=None)
38
49
  if not isinstance(score_name, str) and score_name is not None:
39
- raise Exception("Please provide a string as score name.")
50
+ raise _exceptions.PromptLayerValidationError("Please provide a string as score name.", response=None, body=None)
40
51
  if score < 0 or score > 100:
41
- raise Exception("Please provide a score between 0 and 100.")
42
- return promptlayer_track_score(request_id, score, score_name, api_key)
52
+ raise _exceptions.PromptLayerValidationError(
53
+ "Please provide a score between 0 and 100.", response=None, body=None
54
+ )
55
+ return promptlayer_track_score(api_key, base_url, throw_on_error, request_id, score, score_name)
43
56
 
44
57
 
45
- def group(request_id, group_id, api_key: str = None):
46
- return promptlayer_track_group(request_id, group_id, api_key)
58
+ def group(api_key: str, base_url: str, throw_on_error: bool, request_id, group_id):
59
+ return promptlayer_track_group(api_key, base_url, throw_on_error, request_id, group_id)
60
+
61
+
62
+ async def aprompt(
63
+ api_key: str,
64
+ base_url: str,
65
+ throw_on_error: bool,
66
+ request_id,
67
+ prompt_name,
68
+ prompt_input_variables,
69
+ version=None,
70
+ label=None,
71
+ ):
72
+ if not isinstance(prompt_input_variables, dict):
73
+ raise _exceptions.PromptLayerValidationError(
74
+ "Please provide a dictionary of input variables.", response=None, body=None
75
+ )
76
+ return await apromptlayer_track_prompt(
77
+ api_key, base_url, throw_on_error, request_id, prompt_name, prompt_input_variables, version, label
78
+ )
79
+
80
+
81
+ async def ametadata(api_key: str, base_url: str, throw_on_error: bool, request_id, metadata):
82
+ if not isinstance(metadata, dict):
83
+ raise _exceptions.PromptLayerValidationError(
84
+ "Please provide a dictionary of metadata.", response=None, body=None
85
+ )
86
+ for key, value in metadata.items():
87
+ if not isinstance(key, str) or not isinstance(value, str):
88
+ raise _exceptions.PromptLayerValidationError(
89
+ "Please provide a dictionary of metadata with key-value pairs of strings.", response=None, body=None
90
+ )
91
+ return await apromptlayer_track_metadata(api_key, base_url, throw_on_error, request_id, metadata)
92
+
93
+
94
+ async def ascore(api_key: str, base_url: str, throw_on_error: bool, request_id, score, score_name=None):
95
+ if not isinstance(score, int):
96
+ raise _exceptions.PromptLayerValidationError("Please provide an integer score.", response=None, body=None)
97
+ if not isinstance(score_name, str) and score_name is not None:
98
+ raise _exceptions.PromptLayerValidationError("Please provide a string as score name.", response=None, body=None)
99
+ if score < 0 or score > 100:
100
+ raise _exceptions.PromptLayerValidationError(
101
+ "Please provide a score between 0 and 100.", response=None, body=None
102
+ )
103
+ return await apromptlayer_track_score(api_key, base_url, throw_on_error, request_id, score, score_name)
104
+
105
+
106
+ async def agroup(api_key: str, base_url: str, throw_on_error: bool, request_id, group_id):
107
+ return await apromptlayer_track_group(api_key, base_url, throw_on_error, request_id, group_id)
@@ -1,3 +1,4 @@
1
1
  from . import prompt_template
2
+ from .request_log import RequestLog
2
3
 
3
- __all__ = ["prompt_template"]
4
+ __all__ = ["prompt_template", "RequestLog"]
@@ -18,9 +18,40 @@ class ImageUrl(TypedDict, total=False):
18
18
  url: str
19
19
 
20
20
 
21
+ class WebAnnotation(TypedDict, total=False):
22
+ type: Literal["web_annotation"]
23
+ title: str
24
+ url: str
25
+ start_index: int
26
+ end_index: int
27
+
28
+
29
+ class FileAnnotation(TypedDict, total=False):
30
+ type: Literal["file_annotation"]
31
+ index: int
32
+ file_id: str
33
+ filename: str
34
+
35
+
21
36
  class TextContent(TypedDict, total=False):
22
37
  type: Literal["text"]
23
38
  text: str
39
+ id: Union[str, None]
40
+ annotations: Union[List[Union[WebAnnotation, FileAnnotation]], None]
41
+
42
+
43
+ class CodeContent(TypedDict, total=False):
44
+ type: Literal["code"]
45
+ code: str
46
+ id: Union[str, None]
47
+ container_id: Union[str, None]
48
+
49
+
50
+ class ThinkingContent(TypedDict, total=False):
51
+ signature: Union[str, None]
52
+ type: Literal["thinking"]
53
+ thinking: str
54
+ id: Union[str, None]
24
55
 
25
56
 
26
57
  class ImageContent(TypedDict, total=False):
@@ -28,7 +59,23 @@ class ImageContent(TypedDict, total=False):
28
59
  image_url: ImageUrl
29
60
 
30
61
 
31
- Content = Union[TextContent, ImageContent]
62
+ class Media(TypedDict, total=False):
63
+ title: str
64
+ type: str
65
+ url: str
66
+
67
+
68
+ class MediaContnt(TypedDict, total=False):
69
+ type: Literal["media"]
70
+ media: Media
71
+
72
+
73
+ class MediaVariable(TypedDict, total=False):
74
+ type: Literal["media_variable"]
75
+ name: str
76
+
77
+
78
+ Content = Union[TextContent, ThinkingContent, CodeContent, ImageContent, MediaContnt, MediaVariable]
32
79
 
33
80
 
34
81
  class Function(TypedDict, total=False):
@@ -65,6 +112,7 @@ class UserMessage(TypedDict, total=False):
65
112
 
66
113
  class ToolCall(TypedDict, total=False):
67
114
  id: str
115
+ tool_id: Union[str, None]
68
116
  type: Literal["function"]
69
117
  function: FunctionCall
70
118
 
@@ -101,6 +149,13 @@ class PlaceholderMessage(TypedDict, total=False):
101
149
  name: str
102
150
 
103
151
 
152
+ class DeveloperMessage(TypedDict, total=False):
153
+ role: Literal["developer"]
154
+ input_variables: List[str]
155
+ template_format: TemplateFormat
156
+ content: Sequence[Content]
157
+
158
+
104
159
  class ChatFunctionCall(TypedDict, total=False):
105
160
  name: str
106
161
 
@@ -119,6 +174,7 @@ Message = Union[
119
174
  FunctionMessage,
120
175
  ToolMessage,
121
176
  PlaceholderMessage,
177
+ DeveloperMessage,
122
178
  ]
123
179
 
124
180
 
@@ -157,13 +213,13 @@ class BasePromptTemplate(TypedDict, total=False):
157
213
  tags: List[str]
158
214
 
159
215
 
160
- class PromptVersion(TypedDict, total=False):
216
+ class PromptBlueprint(TypedDict, total=False):
161
217
  prompt_template: PromptTemplate
162
218
  commit_message: str
163
219
  metadata: Metadata
164
220
 
165
221
 
166
- class PublishPromptTemplate(BasePromptTemplate, PromptVersion, total=False):
222
+ class PublishPromptTemplate(BasePromptTemplate, PromptBlueprint, total=False):
167
223
  release_labels: Optional[List[str]] = None
168
224
 
169
225
 
@@ -0,0 +1,8 @@
1
+ from typing import TypedDict, Union
2
+
3
+ from .prompt_template import PromptBlueprint
4
+
5
+
6
+ class RequestLog(TypedDict):
7
+ id: int
8
+ prompt_version: Union[PromptBlueprint, None]