promptlayer 1.0.35__py3-none-any.whl → 1.0.78__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,106 @@
1
+ from typing import Any, AsyncGenerator, AsyncIterable, Callable, Dict, Generator
2
+
3
+ from .blueprint_builder import (
4
+ build_prompt_blueprint_from_anthropic_event,
5
+ build_prompt_blueprint_from_bedrock_event,
6
+ build_prompt_blueprint_from_google_event,
7
+ build_prompt_blueprint_from_openai_chunk,
8
+ build_prompt_blueprint_from_openai_responses_event,
9
+ )
10
+
11
+
12
+ def _build_stream_blueprint(result: Any, metadata: Dict) -> Any:
13
+ model_info = metadata.get("model", {}) if metadata else {}
14
+ provider = model_info.get("provider", "")
15
+ model_name = model_info.get("name", "")
16
+
17
+ if provider == "openai" or provider == "openai.azure":
18
+ api_type = model_info.get("api_type", "chat-completions") if metadata else "chat-completions"
19
+ if api_type == "chat-completions":
20
+ return build_prompt_blueprint_from_openai_chunk(result, metadata)
21
+ elif api_type == "responses":
22
+ return build_prompt_blueprint_from_openai_responses_event(result, metadata)
23
+
24
+ elif provider == "google" or (provider == "vertexai" and model_name.startswith("gemini")):
25
+ return build_prompt_blueprint_from_google_event(result, metadata)
26
+
27
+ elif provider in ("anthropic", "anthropic.bedrock") or (provider == "vertexai" and model_name.startswith("claude")):
28
+ return build_prompt_blueprint_from_anthropic_event(result, metadata)
29
+
30
+ elif provider == "mistral":
31
+ return build_prompt_blueprint_from_openai_chunk(result.data, metadata)
32
+
33
+ elif provider == "amazon.bedrock":
34
+ return build_prompt_blueprint_from_bedrock_event(result, metadata)
35
+
36
+ return None
37
+
38
+
39
+ def _build_stream_data(result: Any, stream_blueprint: Any, request_id: Any = None) -> Dict[str, Any]:
40
+ return {
41
+ "request_id": request_id,
42
+ "raw_response": result,
43
+ "prompt_blueprint": stream_blueprint,
44
+ }
45
+
46
+
47
+ def stream_response(*, generator: Generator, after_stream: Callable, map_results: Callable, metadata: Dict):
48
+ results = []
49
+ provider = metadata.get("model", {}).get("provider", "")
50
+ if provider == "amazon.bedrock":
51
+ response_metadata = generator.get("ResponseMetadata", {})
52
+ generator = generator.get("stream", generator)
53
+
54
+ for result in generator:
55
+ results.append(result)
56
+
57
+ stream_blueprint = _build_stream_blueprint(result, metadata)
58
+ data = _build_stream_data(result, stream_blueprint)
59
+ yield data
60
+
61
+ request_response = map_results(results)
62
+ if provider == "amazon.bedrock":
63
+ request_response["ResponseMetadata"] = response_metadata
64
+ else:
65
+ request_response = request_response.model_dump(mode="json")
66
+
67
+ response = after_stream(request_response=request_response)
68
+ data["request_id"] = response.get("request_id")
69
+ data["prompt_blueprint"] = response.get("prompt_blueprint")
70
+ yield data
71
+
72
+
73
+ async def astream_response(
74
+ generator: AsyncIterable[Any],
75
+ after_stream: Callable[..., Any],
76
+ map_results: Callable[[Any], Any],
77
+ metadata: Dict[str, Any] = None,
78
+ ) -> AsyncGenerator[Dict[str, Any], None]:
79
+ results = []
80
+ provider = metadata.get("model", {}).get("provider", "")
81
+ if provider == "amazon.bedrock":
82
+ response_metadata = generator.get("ResponseMetadata", {})
83
+ generator = generator.get("stream", generator)
84
+
85
+ async for result in generator:
86
+ results.append(result)
87
+
88
+ stream_blueprint = _build_stream_blueprint(result, metadata)
89
+ data = _build_stream_data(result, stream_blueprint)
90
+ yield data
91
+
92
+ async def async_generator_from_list(lst):
93
+ for item in lst:
94
+ yield item
95
+
96
+ request_response = await map_results(async_generator_from_list(results))
97
+
98
+ if provider == "amazon.bedrock":
99
+ request_response["ResponseMetadata"] = response_metadata
100
+ else:
101
+ request_response = request_response.model_dump(mode="json")
102
+
103
+ after_stream_response = await after_stream(request_response=request_response)
104
+ data["request_id"] = after_stream_response.get("request_id")
105
+ data["prompt_blueprint"] = after_stream_response.get("prompt_blueprint")
106
+ yield data
promptlayer/templates.py CHANGED
@@ -11,27 +11,29 @@ from promptlayer.utils import (
11
11
 
12
12
 
13
13
  class TemplateManager:
14
- def __init__(self, api_key: str):
14
+ def __init__(self, api_key: str, base_url: str, throw_on_error: bool):
15
15
  self.api_key = api_key
16
+ self.base_url = base_url
17
+ self.throw_on_error = throw_on_error
16
18
 
17
19
  def get(self, prompt_name: str, params: Union[GetPromptTemplate, None] = None):
18
- return get_prompt_template(prompt_name, params, self.api_key)
20
+ return get_prompt_template(self.api_key, self.base_url, self.throw_on_error, prompt_name, params)
19
21
 
20
22
  def publish(self, body: PublishPromptTemplate):
21
- return publish_prompt_template(body, self.api_key)
23
+ return publish_prompt_template(self.api_key, self.base_url, self.throw_on_error, body)
22
24
 
23
- def all(self, page: int = 1, per_page: int = 30):
24
- return get_all_prompt_templates(page, per_page, self.api_key)
25
+ def all(self, page: int = 1, per_page: int = 30, label: str = None):
26
+ return get_all_prompt_templates(self.api_key, self.base_url, self.throw_on_error, page, per_page, label)
25
27
 
26
28
 
27
29
  class AsyncTemplateManager:
28
- def __init__(self, api_key: str):
30
+ def __init__(self, api_key: str, base_url: str, throw_on_error: bool):
29
31
  self.api_key = api_key
32
+ self.base_url = base_url
33
+ self.throw_on_error = throw_on_error
30
34
 
31
- async def get(
32
- self, prompt_name: str, params: Union[GetPromptTemplate, None] = None
33
- ):
34
- return await aget_prompt_template(prompt_name, params, self.api_key)
35
+ async def get(self, prompt_name: str, params: Union[GetPromptTemplate, None] = None):
36
+ return await aget_prompt_template(self.api_key, self.base_url, self.throw_on_error, prompt_name, params)
35
37
 
36
- async def all(self, page: int = 1, per_page: int = 30):
37
- return await aget_all_prompt_templates(page, per_page, self.api_key)
38
+ async def all(self, page: int = 1, per_page: int = 30, label: str = None):
39
+ return await aget_all_prompt_templates(self.api_key, self.base_url, self.throw_on_error, page, per_page, label)
@@ -1,59 +1,71 @@
1
- from promptlayer.track.track import agroup, ametadata, aprompt, ascore, group
2
- from promptlayer.track.track import metadata as metadata_
3
- from promptlayer.track.track import prompt
4
- from promptlayer.track.track import score as score_
1
+ from promptlayer.track.track import (
2
+ agroup,
3
+ ametadata,
4
+ aprompt,
5
+ ascore,
6
+ group,
7
+ metadata as metadata_,
8
+ prompt,
9
+ score as score_,
10
+ )
11
+
12
+ # TODO(dmu) LOW: Move this code to another file
5
13
 
6
14
 
7
15
  class TrackManager:
8
- def __init__(self, api_key: str):
16
+ def __init__(self, api_key: str, base_url: str, throw_on_error: bool):
9
17
  self.api_key = api_key
18
+ self.base_url = base_url
19
+ self.throw_on_error = throw_on_error
10
20
 
11
21
  def group(self, request_id, group_id):
12
- return group(request_id, group_id, self.api_key)
22
+ return group(self.api_key, self.base_url, self.throw_on_error, request_id, group_id)
13
23
 
14
24
  def metadata(self, request_id, metadata):
15
- return metadata_(request_id, metadata, self.api_key)
25
+ return metadata_(self.api_key, self.base_url, self.throw_on_error, request_id, metadata)
16
26
 
17
- def prompt(
18
- self, request_id, prompt_name, prompt_input_variables, version=None, label=None
19
- ):
27
+ def prompt(self, request_id, prompt_name, prompt_input_variables, version=None, label=None):
20
28
  return prompt(
29
+ self.api_key,
30
+ self.base_url,
31
+ self.throw_on_error,
21
32
  request_id,
22
33
  prompt_name,
23
34
  prompt_input_variables,
24
35
  version,
25
36
  label,
26
- self.api_key,
27
37
  )
28
38
 
29
39
  def score(self, request_id, score, score_name=None):
30
- return score_(request_id, score, score_name, self.api_key)
40
+ return score_(self.api_key, self.base_url, self.throw_on_error, request_id, score, score_name)
31
41
 
32
42
 
33
43
  class AsyncTrackManager:
34
- def __init__(self, api_key: str):
44
+ def __init__(self, api_key: str, base_url: str, throw_on_error: bool):
35
45
  self.api_key = api_key
46
+ self.base_url = base_url
47
+ self.throw_on_error = throw_on_error
36
48
 
37
49
  async def group(self, request_id, group_id):
38
- return await agroup(request_id, group_id, self.api_key)
50
+ return await agroup(self.api_key, self.base_url, self.throw_on_error, request_id, group_id)
39
51
 
40
52
  async def metadata(self, request_id, metadata):
41
- return await ametadata(request_id, metadata, self.api_key)
53
+ return await ametadata(self.api_key, self.base_url, self.throw_on_error, request_id, metadata)
42
54
 
43
- async def prompt(
44
- self, request_id, prompt_name, prompt_input_variables, version=None, label=None
45
- ):
55
+ async def prompt(self, request_id, prompt_name, prompt_input_variables, version=None, label=None):
46
56
  return await aprompt(
57
+ self.api_key,
58
+ self.base_url,
59
+ self.throw_on_error,
47
60
  request_id,
48
61
  prompt_name,
49
62
  prompt_input_variables,
50
63
  version,
51
64
  label,
52
- self.api_key,
53
65
  )
54
66
 
55
67
  async def score(self, request_id, score, score_name=None):
56
- return await ascore(request_id, score, score_name, self.api_key)
68
+ return await ascore(self.api_key, self.base_url, self.throw_on_error, request_id, score, score_name)
57
69
 
58
70
 
59
71
  __all__ = ["TrackManager"]
@@ -1,3 +1,4 @@
1
+ from promptlayer import exceptions as _exceptions
1
2
  from promptlayer.utils import (
2
3
  apromptlayer_track_group,
3
4
  apromptlayer_track_metadata,
@@ -11,80 +12,96 @@ from promptlayer.utils import (
11
12
 
12
13
 
13
14
  def prompt(
15
+ api_key: str,
16
+ base_url: str,
17
+ throw_on_error: bool,
14
18
  request_id,
15
19
  prompt_name,
16
20
  prompt_input_variables,
17
21
  version=None,
18
22
  label=None,
19
- api_key: str = None,
20
23
  ):
21
24
  if not isinstance(prompt_input_variables, dict):
22
- raise Exception("Please provide a dictionary of input variables.")
25
+ raise _exceptions.PromptLayerValidationError(
26
+ "Please provide a dictionary of input variables.", response=None, body=None
27
+ )
23
28
  return promptlayer_track_prompt(
24
- request_id, prompt_name, prompt_input_variables, api_key, version, label
29
+ api_key, base_url, throw_on_error, request_id, prompt_name, prompt_input_variables, api_key, version, label
25
30
  )
26
31
 
27
32
 
28
- def metadata(request_id, metadata, api_key: str = None):
33
+ def metadata(api_key: str, base_url: str, throw_on_error: bool, request_id, metadata):
29
34
  if not isinstance(metadata, dict):
30
- raise Exception("Please provide a dictionary of metadata.")
35
+ raise _exceptions.PromptLayerValidationError(
36
+ "Please provide a dictionary of metadata.", response=None, body=None
37
+ )
31
38
  for key, value in metadata.items():
32
39
  if not isinstance(key, str) or not isinstance(value, str):
33
- raise Exception(
34
- "Please provide a dictionary of metadata with key value pair of strings."
40
+ raise _exceptions.PromptLayerValidationError(
41
+ "Please provide a dictionary of metadata with key value pair of strings.", response=None, body=None
35
42
  )
36
- return promptlayer_track_metadata(request_id, metadata, api_key)
43
+ return promptlayer_track_metadata(api_key, base_url, throw_on_error, request_id, metadata)
37
44
 
38
45
 
39
- def score(request_id, score, score_name=None, api_key: str = None):
46
+ def score(api_key: str, base_url: str, throw_on_error: bool, request_id, score, score_name=None):
40
47
  if not isinstance(score, int):
41
- raise Exception("Please provide a int score.")
48
+ raise _exceptions.PromptLayerValidationError("Please provide a int score.", response=None, body=None)
42
49
  if not isinstance(score_name, str) and score_name is not None:
43
- raise Exception("Please provide a string as score name.")
50
+ raise _exceptions.PromptLayerValidationError("Please provide a string as score name.", response=None, body=None)
44
51
  if score < 0 or score > 100:
45
- raise Exception("Please provide a score between 0 and 100.")
46
- return promptlayer_track_score(request_id, score, score_name, api_key)
52
+ raise _exceptions.PromptLayerValidationError(
53
+ "Please provide a score between 0 and 100.", response=None, body=None
54
+ )
55
+ return promptlayer_track_score(api_key, base_url, throw_on_error, request_id, score, score_name)
47
56
 
48
57
 
49
- def group(request_id, group_id, api_key: str = None):
50
- return promptlayer_track_group(request_id, group_id, api_key)
58
+ def group(api_key: str, base_url: str, throw_on_error: bool, request_id, group_id):
59
+ return promptlayer_track_group(api_key, base_url, throw_on_error, request_id, group_id)
51
60
 
52
61
 
53
62
  async def aprompt(
63
+ api_key: str,
64
+ base_url: str,
65
+ throw_on_error: bool,
54
66
  request_id,
55
67
  prompt_name,
56
68
  prompt_input_variables,
57
69
  version=None,
58
70
  label=None,
59
- api_key: str = None,
60
71
  ):
61
72
  if not isinstance(prompt_input_variables, dict):
62
- raise Exception("Please provide a dictionary of input variables.")
73
+ raise _exceptions.PromptLayerValidationError(
74
+ "Please provide a dictionary of input variables.", response=None, body=None
75
+ )
63
76
  return await apromptlayer_track_prompt(
64
- request_id, prompt_name, prompt_input_variables, api_key, version, label
77
+ api_key, base_url, throw_on_error, request_id, prompt_name, prompt_input_variables, version, label
65
78
  )
66
79
 
67
80
 
68
- async def ametadata(request_id, metadata, api_key: str = None):
81
+ async def ametadata(api_key: str, base_url: str, throw_on_error: bool, request_id, metadata):
69
82
  if not isinstance(metadata, dict):
70
- raise Exception("Please provide a dictionary of metadata.")
83
+ raise _exceptions.PromptLayerValidationError(
84
+ "Please provide a dictionary of metadata.", response=None, body=None
85
+ )
71
86
  for key, value in metadata.items():
72
87
  if not isinstance(key, str) or not isinstance(value, str):
73
- raise Exception(
74
- "Please provide a dictionary of metadata with key-value pairs of strings."
88
+ raise _exceptions.PromptLayerValidationError(
89
+ "Please provide a dictionary of metadata with key-value pairs of strings.", response=None, body=None
75
90
  )
76
- return await apromptlayer_track_metadata(request_id, metadata, api_key)
91
+ return await apromptlayer_track_metadata(api_key, base_url, throw_on_error, request_id, metadata)
77
92
 
78
93
 
79
- async def ascore(request_id, score, score_name=None, api_key: str = None):
94
+ async def ascore(api_key: str, base_url: str, throw_on_error: bool, request_id, score, score_name=None):
80
95
  if not isinstance(score, int):
81
- raise Exception("Please provide an integer score.")
96
+ raise _exceptions.PromptLayerValidationError("Please provide an integer score.", response=None, body=None)
82
97
  if not isinstance(score_name, str) and score_name is not None:
83
- raise Exception("Please provide a string as score name.")
98
+ raise _exceptions.PromptLayerValidationError("Please provide a string as score name.", response=None, body=None)
84
99
  if score < 0 or score > 100:
85
- raise Exception("Please provide a score between 0 and 100.")
86
- return await apromptlayer_track_score(request_id, score, score_name, api_key)
100
+ raise _exceptions.PromptLayerValidationError(
101
+ "Please provide a score between 0 and 100.", response=None, body=None
102
+ )
103
+ return await apromptlayer_track_score(api_key, base_url, throw_on_error, request_id, score, score_name)
87
104
 
88
105
 
89
- async def agroup(request_id, group_id, api_key: str = None):
90
- return await apromptlayer_track_group(request_id, group_id, api_key)
106
+ async def agroup(api_key: str, base_url: str, throw_on_error: bool, request_id, group_id):
107
+ return await apromptlayer_track_group(api_key, base_url, throw_on_error, request_id, group_id)
@@ -18,9 +18,40 @@ class ImageUrl(TypedDict, total=False):
18
18
  url: str
19
19
 
20
20
 
21
+ class WebAnnotation(TypedDict, total=False):
22
+ type: Literal["web_annotation"]
23
+ title: str
24
+ url: str
25
+ start_index: int
26
+ end_index: int
27
+
28
+
29
+ class FileAnnotation(TypedDict, total=False):
30
+ type: Literal["file_annotation"]
31
+ index: int
32
+ file_id: str
33
+ filename: str
34
+
35
+
21
36
  class TextContent(TypedDict, total=False):
22
37
  type: Literal["text"]
23
38
  text: str
39
+ id: Union[str, None]
40
+ annotations: Union[List[Union[WebAnnotation, FileAnnotation]], None]
41
+
42
+
43
+ class CodeContent(TypedDict, total=False):
44
+ type: Literal["code"]
45
+ code: str
46
+ id: Union[str, None]
47
+ container_id: Union[str, None]
48
+
49
+
50
+ class ThinkingContent(TypedDict, total=False):
51
+ signature: Union[str, None]
52
+ type: Literal["thinking"]
53
+ thinking: str
54
+ id: Union[str, None]
24
55
 
25
56
 
26
57
  class ImageContent(TypedDict, total=False):
@@ -44,7 +75,7 @@ class MediaVariable(TypedDict, total=False):
44
75
  name: str
45
76
 
46
77
 
47
- Content = Union[TextContent, ImageContent, MediaContnt, MediaVariable]
78
+ Content = Union[TextContent, ThinkingContent, CodeContent, ImageContent, MediaContnt, MediaVariable]
48
79
 
49
80
 
50
81
  class Function(TypedDict, total=False):
@@ -81,6 +112,7 @@ class UserMessage(TypedDict, total=False):
81
112
 
82
113
  class ToolCall(TypedDict, total=False):
83
114
  id: str
115
+ tool_id: Union[str, None]
84
116
  type: Literal["function"]
85
117
  function: FunctionCall
86
118