promptlayer 1.0.61__py3-none-any.whl → 1.0.63__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

promptlayer/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  from .promptlayer import AsyncPromptLayer, PromptLayer
2
2
 
3
- __version__ = "1.0.61"
3
+ __version__ = "1.0.63"
4
4
  __all__ = ["PromptLayer", "AsyncPromptLayer", "__version__"]
@@ -27,10 +27,12 @@ from promptlayer.streaming import (
27
27
  openai_stream_completion,
28
28
  )
29
29
  from promptlayer.utils import (
30
+ aanthropic_bedrock_request,
30
31
  aanthropic_request,
31
32
  aazure_openai_request,
32
33
  agoogle_request,
33
34
  amistral_request,
35
+ anthropic_bedrock_request,
34
36
  anthropic_request,
35
37
  aopenai_request,
36
38
  avertexai_request,
@@ -92,6 +94,16 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
92
94
  "stream_function": google_stream_completion,
93
95
  },
94
96
  },
97
+ "anthropic.bedrock": {
98
+ "chat": {
99
+ "function_name": "anthropic.messages.create",
100
+ "stream_function": anthropic_stream_message,
101
+ },
102
+ "completion": {
103
+ "function_name": "anthropic.completions.create",
104
+ "stream_function": anthropic_stream_completion,
105
+ },
106
+ },
95
107
  }
96
108
 
97
109
 
@@ -102,6 +114,7 @@ MAP_PROVIDER_TO_FUNCTION = {
102
114
  "openai": openai_request,
103
115
  "openai.azure": azure_openai_request,
104
116
  "vertexai": vertexai_request,
117
+ "anthropic.bedrock": anthropic_bedrock_request,
105
118
  }
106
119
 
107
120
  AMAP_PROVIDER_TO_FUNCTION_NAME = {
@@ -155,6 +168,16 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
155
168
  "stream_function": agoogle_stream_completion,
156
169
  },
157
170
  },
171
+ "anthropic.bedrock": {
172
+ "chat": {
173
+ "function_name": "anthropic.messages.create",
174
+ "stream_function": aanthropic_stream_message,
175
+ },
176
+ "completion": {
177
+ "function_name": "anthropic.completions.create",
178
+ "stream_function": aanthropic_stream_completion,
179
+ },
180
+ },
158
181
  }
159
182
 
160
183
 
@@ -165,6 +188,7 @@ AMAP_PROVIDER_TO_FUNCTION = {
165
188
  "openai": aopenai_request,
166
189
  "openai.azure": aazure_openai_request,
167
190
  "vertexai": avertexai_request,
191
+ "anthropic.bedrock": aanthropic_bedrock_request,
168
192
  }
169
193
 
170
194
 
@@ -1,10 +1,3 @@
1
- """
2
- Stream processors for handling streaming responses
3
-
4
- This module contains the main streaming logic that processes streaming responses
5
- from various LLM providers and builds progressive prompt blueprints.
6
- """
7
-
8
1
  from typing import Any, AsyncGenerator, AsyncIterable, Callable, Dict, Generator
9
2
 
10
3
  from .blueprint_builder import (
@@ -14,35 +7,41 @@ from .blueprint_builder import (
14
7
  )
15
8
 
16
9
 
17
- def stream_response(*, generator: Generator, after_stream: Callable, map_results: Callable, metadata: Dict):
18
- """
19
- Process streaming responses and build progressive prompt blueprints
10
+ def _build_stream_blueprint(result: Any, metadata: Dict) -> Any:
11
+ model_info = metadata.get("model", {}) if metadata else {}
12
+ provider = model_info.get("provider", "")
13
+ model_name = model_info.get("name", "")
20
14
 
21
- Supports OpenAI, Anthropic, and Google (Gemini) streaming formats, building blueprints
22
- progressively as the stream progresses.
23
- """
24
- results = []
25
- stream_blueprint = None
26
- for result in generator:
27
- results.append(result)
15
+ if provider == "openai" or provider == "openai.azure":
16
+ return build_prompt_blueprint_from_openai_chunk(result, metadata)
17
+
18
+ elif provider == "google" or (provider == "vertexai" and model_name.startswith("gemini")):
19
+ return build_prompt_blueprint_from_google_event(result, metadata)
20
+
21
+ elif provider in ["anthropic", "anthropic.bedrock"] or (provider == "vertexai" and model_name.startswith("claude")):
22
+ return build_prompt_blueprint_from_anthropic_event(result, metadata)
23
+
24
+ elif provider == "mistral":
25
+ return build_prompt_blueprint_from_openai_chunk(result.data, metadata)
26
+
27
+ return None
28
28
 
29
- # Handle OpenAI streaming format - process each chunk individually
30
- if hasattr(result, "choices"):
31
- stream_blueprint = build_prompt_blueprint_from_openai_chunk(result, metadata)
32
29
 
33
- # Handle Google streaming format (Gemini) - GenerateContentResponse objects
34
- elif hasattr(result, "candidates"):
35
- stream_blueprint = build_prompt_blueprint_from_google_event(result, metadata)
30
+ def _build_stream_data(result: Any, stream_blueprint: Any, request_id: Any = None) -> Dict[str, Any]:
31
+ return {
32
+ "request_id": request_id,
33
+ "raw_response": result,
34
+ "prompt_blueprint": stream_blueprint,
35
+ }
36
36
 
37
- # Handle Anthropic streaming format - process each event individually
38
- elif hasattr(result, "type"):
39
- stream_blueprint = build_prompt_blueprint_from_anthropic_event(result, metadata)
40
37
 
41
- data = {
42
- "request_id": None,
43
- "raw_response": result,
44
- "prompt_blueprint": stream_blueprint,
45
- }
38
+ def stream_response(*, generator: Generator, after_stream: Callable, map_results: Callable, metadata: Dict):
39
+ results = []
40
+ for result in generator:
41
+ results.append(result)
42
+
43
+ stream_blueprint = _build_stream_blueprint(result, metadata)
44
+ data = _build_stream_data(result, stream_blueprint)
46
45
  yield data
47
46
 
48
47
  request_response = map_results(results)
@@ -58,35 +57,13 @@ async def astream_response(
58
57
  map_results: Callable[[Any], Any],
59
58
  metadata: Dict[str, Any] = None,
60
59
  ) -> AsyncGenerator[Dict[str, Any], None]:
61
- """
62
- Async version of stream_response
63
-
64
- Process streaming responses asynchronously and build progressive prompt blueprints
65
- Supports OpenAI, Anthropic, and Google (Gemini) streaming formats.
66
- """
67
60
  results = []
68
- stream_blueprint = None
69
61
 
70
62
  async for result in generator:
71
63
  results.append(result)
72
64
 
73
- # Handle OpenAI streaming format - process each chunk individually
74
- if hasattr(result, "choices"):
75
- stream_blueprint = build_prompt_blueprint_from_openai_chunk(result, metadata)
76
-
77
- # Handle Google streaming format (Gemini) - GenerateContentResponse objects
78
- elif hasattr(result, "candidates"):
79
- stream_blueprint = build_prompt_blueprint_from_google_event(result, metadata)
80
-
81
- # Handle Anthropic streaming format - process each event individually
82
- elif hasattr(result, "type"):
83
- stream_blueprint = build_prompt_blueprint_from_anthropic_event(result, metadata)
84
-
85
- data = {
86
- "request_id": None,
87
- "raw_response": result,
88
- "prompt_blueprint": stream_blueprint,
89
- }
65
+ stream_blueprint = _build_stream_blueprint(result, metadata)
66
+ data = _build_stream_data(result, stream_blueprint)
90
67
  yield data
91
68
 
92
69
  async def async_generator_from_list(lst):
promptlayer/utils.py CHANGED
@@ -59,6 +59,10 @@ def _make_httpx_client():
59
59
  return httpx.AsyncClient(timeout=_get_http_timeout())
60
60
 
61
61
 
62
+ def _make_simple_httpx_client():
63
+ return httpx.Client(timeout=_get_http_timeout())
64
+
65
+
62
66
  def _get_workflow_workflow_id_or_name(workflow_id_or_name, workflow_name):
63
67
  # This is backward compatibility code
64
68
  if (workflow_id_or_name := workflow_name if workflow_id_or_name is None else workflow_id_or_name) is None:
@@ -1396,7 +1400,7 @@ async def autil_log_request(api_key: str, **kwargs) -> Union[RequestLog, None]:
1396
1400
  def mistral_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1397
1401
  from mistralai import Mistral
1398
1402
 
1399
- client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY"))
1403
+ client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY"), client=_make_simple_httpx_client())
1400
1404
  if "stream" in function_kwargs and function_kwargs["stream"]:
1401
1405
  function_kwargs.pop("stream")
1402
1406
  return client.chat.stream(**function_kwargs)
@@ -1412,7 +1416,7 @@ async def amistral_request(
1412
1416
  ):
1413
1417
  from mistralai import Mistral
1414
1418
 
1415
- client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY"))
1419
+ client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY"), async_client=_make_httpx_client())
1416
1420
  if "stream" in function_kwargs and function_kwargs["stream"]:
1417
1421
  return await client.chat.stream_async(**function_kwargs)
1418
1422
  return await client.chat.complete_async(**function_kwargs)
@@ -1538,3 +1542,45 @@ async def avertexai_request(prompt_blueprint: GetPromptTemplateResponse, client_
1538
1542
  raise NotImplementedError(
1539
1543
  f"Vertex AI request for model {prompt_blueprint['metadata']['model']['name']} is not implemented yet."
1540
1544
  )
1545
+
1546
+
1547
+ def anthropic_bedrock_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1548
+ from anthropic import AnthropicBedrock
1549
+
1550
+ client = AnthropicBedrock(
1551
+ aws_access_key=function_kwargs.pop("aws_access_key", None),
1552
+ aws_secret_key=function_kwargs.pop("aws_secret_key", None),
1553
+ aws_region=function_kwargs.pop("aws_region", None),
1554
+ aws_session_token=function_kwargs.pop("aws_session_token", None),
1555
+ base_url=function_kwargs.pop("base_url", None),
1556
+ **client_kwargs,
1557
+ )
1558
+ if prompt_blueprint["prompt_template"]["type"] == "chat":
1559
+ return anthropic_chat_request(client=client, **function_kwargs)
1560
+ elif prompt_blueprint["prompt_template"]["type"] == "completion":
1561
+ return anthropic_completions_request(client=client, **function_kwargs)
1562
+ raise NotImplementedError(
1563
+ f"Unsupported prompt template type {prompt_blueprint['prompt_template']['type']}' for Anthropic Bedrock"
1564
+ )
1565
+
1566
+
1567
+ async def aanthropic_bedrock_request(
1568
+ prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict
1569
+ ):
1570
+ from anthropic import AsyncAnthropicBedrock
1571
+
1572
+ client = AsyncAnthropicBedrock(
1573
+ aws_access_key=function_kwargs.pop("aws_access_key", None),
1574
+ aws_secret_key=function_kwargs.pop("aws_secret_key", None),
1575
+ aws_region=function_kwargs.pop("aws_region", None),
1576
+ aws_session_token=function_kwargs.pop("aws_session_token", None),
1577
+ base_url=function_kwargs.pop("base_url", None),
1578
+ **client_kwargs,
1579
+ )
1580
+ if prompt_blueprint["prompt_template"]["type"] == "chat":
1581
+ return await aanthropic_chat_request(client=client, **function_kwargs)
1582
+ elif prompt_blueprint["prompt_template"]["type"] == "completion":
1583
+ return await aanthropic_completions_request(client=client, **function_kwargs)
1584
+ raise NotImplementedError(
1585
+ f"Unsupported prompt template type {prompt_blueprint['prompt_template']['type']}' for Anthropic Bedrock"
1586
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: promptlayer
3
- Version: 1.0.61
3
+ Version: 1.0.63
4
4
  Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
5
5
  License: Apache-2.0
6
6
  Author: Magniv
@@ -1,22 +1,22 @@
1
- promptlayer/__init__.py,sha256=nPvZs4XEeUdjEe3DmFZ5C-KBN4LifXxdYbK2mIextTY,140
1
+ promptlayer/__init__.py,sha256=i_U-wg6qSSPBh8OcEf8G3SnNhnlNFx42LJZLadGC1gg,140
2
2
  promptlayer/groups/__init__.py,sha256=xhOAolLUBkr76ZHvJr29OwjCIk1V9qKQXjZCuyTJUIY,429
3
3
  promptlayer/groups/groups.py,sha256=YPROicy-TzpkrpA8vOpZS2lwvJ6VRtlbQ1S2oT1N0vM,338
4
4
  promptlayer/promptlayer.py,sha256=qaxxSvimmXgN45q-IvWsAtyzIJ-w397F97ofsH_7w00,22516
5
5
  promptlayer/promptlayer_base.py,sha256=jOgXzNZlV1LKOOsXSSAOgn8o4hXn_EV0oY9Nf3Bsu_s,6872
6
- promptlayer/promptlayer_mixins.py,sha256=x-HOqd7SaKnVHoGeySYKzwlPVnqpFH5oCohJQIEjLQY,12172
6
+ promptlayer/promptlayer_mixins.py,sha256=TYxcAAosWG7wmST-TmE4ScAM9KhJy7_K8VfW4Z2Fjlk,13010
7
7
  promptlayer/span_exporter.py,sha256=Pc1-zWAcjVCSykh-4rYPqiEZvzkG9xaYLVoHFY_TWaQ,2410
8
8
  promptlayer/streaming/__init__.py,sha256=yNO77fyOi_scNPbE-eIEDGwSOyp8WYyPZ7ZrHaoipmM,1523
9
9
  promptlayer/streaming/blueprint_builder.py,sha256=NLmqwspHoAsecrY7varbF4EQaUg5yBKfBxS4y7UycuU,5925
10
10
  promptlayer/streaming/response_handlers.py,sha256=vNvpP-RLVl2uHkKLc8Ci9bmNldCezRey40tgtBEd4bo,19005
11
- promptlayer/streaming/stream_processor.py,sha256=wgY2B1PEJA3xWotDtJaeGS7rjhadAh7SVZ_q5QkEPbg,3752
11
+ promptlayer/streaming/stream_processor.py,sha256=vB9pB25bd0vG-Pl2UYqB8Ae6b9iKSH-WCB9S-SNOSAU,2836
12
12
  promptlayer/templates.py,sha256=7ObDPMzHXjttDdJdCXA_pDL9XAnmcujIWucmgZJcOC8,1179
13
13
  promptlayer/track/__init__.py,sha256=tyweLTAY7UpYpBHWwY-T3pOPDIlGjcgccYXqU_r0694,1710
14
14
  promptlayer/track/track.py,sha256=A-awcYwsSwxktrlCMchy8NITIquwxU1UXbgLZMwqrA0,3164
15
15
  promptlayer/types/__init__.py,sha256=xJcvQuOk91ZBBePb40-1FDNDKYrZoH5lPE2q6_UhprM,111
16
16
  promptlayer/types/prompt_template.py,sha256=blkVBhh4u5pMhgX_Dsn78sN7Rv2Vy_zhd1-NERLXTpM,5075
17
17
  promptlayer/types/request_log.py,sha256=xU6bcxQar6GaBOJlgZTavXUV3FjE8sF_nSjPu4Ya_00,174
18
- promptlayer/utils.py,sha256=hUljiXqyatTvyMmKwHZQr53BsqJ7AaSwP91qXZ5Gb_g,55859
19
- promptlayer-1.0.61.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
20
- promptlayer-1.0.61.dist-info/METADATA,sha256=xOL_XD0NZ9kjyr57x7YBotqkflTMLm-FR7_dM1PLNWk,4819
21
- promptlayer-1.0.61.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
22
- promptlayer-1.0.61.dist-info/RECORD,,
18
+ promptlayer/utils.py,sha256=DNhX6ydXXIBDKI9c0d_Jke2l-ZjpdvIpWqXM7ebjt20,58029
19
+ promptlayer-1.0.63.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
20
+ promptlayer-1.0.63.dist-info/METADATA,sha256=m9PDd_ZohbPjrl2jB4yfAbg96q9IpfeVUFxaFzm8wrQ,4819
21
+ promptlayer-1.0.63.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
22
+ promptlayer-1.0.63.dist-info/RECORD,,