promptlayer 1.0.64__tar.gz → 1.0.66__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

Files changed (22) hide show
  1. {promptlayer-1.0.64 → promptlayer-1.0.66}/PKG-INFO +1 -1
  2. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/__init__.py +1 -1
  3. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/promptlayer.py +14 -4
  4. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/promptlayer_mixins.py +26 -0
  5. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/streaming/__init__.py +4 -0
  6. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/streaming/blueprint_builder.py +35 -0
  7. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/streaming/response_handlers.py +145 -0
  8. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/streaming/stream_processor.py +27 -3
  9. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/utils.py +61 -2
  10. {promptlayer-1.0.64 → promptlayer-1.0.66}/pyproject.toml +3 -1
  11. {promptlayer-1.0.64 → promptlayer-1.0.66}/LICENSE +0 -0
  12. {promptlayer-1.0.64 → promptlayer-1.0.66}/README.md +0 -0
  13. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/groups/__init__.py +0 -0
  14. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/groups/groups.py +0 -0
  15. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/promptlayer_base.py +0 -0
  16. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/span_exporter.py +0 -0
  17. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/templates.py +0 -0
  18. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/track/__init__.py +0 -0
  19. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/track/track.py +0 -0
  20. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/types/__init__.py +0 -0
  21. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/types/prompt_template.py +0 -0
  22. {promptlayer-1.0.64 → promptlayer-1.0.66}/promptlayer/types/request_log.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: promptlayer
3
- Version: 1.0.64
3
+ Version: 1.0.66
4
4
  Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
5
5
  License: Apache-2.0
6
6
  Author: Magniv
@@ -1,4 +1,4 @@
1
1
  from .promptlayer import AsyncPromptLayer, PromptLayer
2
2
 
3
- __version__ = "1.0.64"
3
+ __version__ = "1.0.66"
4
4
  __all__ = ["PromptLayer", "AsyncPromptLayer", "__version__"]
@@ -171,6 +171,11 @@ class PromptLayer(PromptLayerMixin):
171
171
  metadata=llm_data["prompt_blueprint"]["metadata"],
172
172
  )
173
173
 
174
+ if isinstance(response, dict):
175
+ request_response = response
176
+ else:
177
+ request_response = response.model_dump(mode="json")
178
+
174
179
  request_log = self._track_request_log(
175
180
  llm_data,
176
181
  tags,
@@ -178,7 +183,7 @@ class PromptLayer(PromptLayerMixin):
178
183
  group_id,
179
184
  pl_run_span_id,
180
185
  metadata=metadata,
181
- request_response=response.model_dump(mode="json"),
186
+ request_response=request_response,
182
187
  )
183
188
 
184
189
  return {
@@ -592,6 +597,11 @@ class AsyncPromptLayer(PromptLayerMixin):
592
597
  function_kwargs=llm_data["function_kwargs"],
593
598
  )
594
599
 
600
+ if isinstance(response, dict):
601
+ request_response = response
602
+ else:
603
+ request_response = response.model_dump(mode="json")
604
+
595
605
  if stream:
596
606
  track_request_callable = await self._create_track_request_callable(
597
607
  request_params=llm_data,
@@ -601,7 +611,7 @@ class AsyncPromptLayer(PromptLayerMixin):
601
611
  pl_run_span_id=pl_run_span_id,
602
612
  )
603
613
  return astream_response(
604
- response,
614
+ request_response,
605
615
  track_request_callable,
606
616
  llm_data["stream_function"],
607
617
  llm_data["prompt_blueprint"]["metadata"],
@@ -614,11 +624,11 @@ class AsyncPromptLayer(PromptLayerMixin):
614
624
  group_id,
615
625
  pl_run_span_id,
616
626
  metadata=metadata,
617
- request_response=response.model_dump(mode="json"),
627
+ request_response=request_response,
618
628
  )
619
629
 
620
630
  return {
621
631
  "request_id": request_log.get("request_id", None),
622
- "raw_response": response,
632
+ "raw_response": request_response,
623
633
  "prompt_blueprint": request_log.get("prompt_blueprint", None),
624
634
  }
@@ -13,6 +13,7 @@ from promptlayer.span_exporter import PromptLayerSpanExporter
13
13
  from promptlayer.streaming import (
14
14
  aanthropic_stream_completion,
15
15
  aanthropic_stream_message,
16
+ abedrock_stream_message,
16
17
  agoogle_stream_chat,
17
18
  agoogle_stream_completion,
18
19
  amistral_stream_chat,
@@ -20,6 +21,7 @@ from promptlayer.streaming import (
20
21
  anthropic_stream_message,
21
22
  aopenai_stream_chat,
22
23
  aopenai_stream_completion,
24
+ bedrock_stream_message,
23
25
  google_stream_chat,
24
26
  google_stream_completion,
25
27
  mistral_stream_chat,
@@ -27,10 +29,12 @@ from promptlayer.streaming import (
27
29
  openai_stream_completion,
28
30
  )
29
31
  from promptlayer.utils import (
32
+ aamazon_bedrock_request,
30
33
  aanthropic_bedrock_request,
31
34
  aanthropic_request,
32
35
  aazure_openai_request,
33
36
  agoogle_request,
37
+ amazon_bedrock_request,
34
38
  amistral_request,
35
39
  anthropic_bedrock_request,
36
40
  anthropic_request,
@@ -94,6 +98,16 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
94
98
  "stream_function": google_stream_completion,
95
99
  },
96
100
  },
101
+ "amazon.bedrock": {
102
+ "chat": {
103
+ "function_name": "boto3.bedrock-runtime.converse",
104
+ "stream_function": bedrock_stream_message,
105
+ },
106
+ "completion": {
107
+ "function_name": "boto3.bedrock-runtime.converse",
108
+ "stream_function": bedrock_stream_message,
109
+ },
110
+ },
97
111
  "anthropic.bedrock": {
98
112
  "chat": {
99
113
  "function_name": "anthropic.messages.create",
@@ -114,6 +128,7 @@ MAP_PROVIDER_TO_FUNCTION = {
114
128
  "openai": openai_request,
115
129
  "openai.azure": azure_openai_request,
116
130
  "vertexai": vertexai_request,
131
+ "amazon.bedrock": amazon_bedrock_request,
117
132
  "anthropic.bedrock": anthropic_bedrock_request,
118
133
  }
119
134
 
@@ -168,6 +183,16 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
168
183
  "stream_function": agoogle_stream_completion,
169
184
  },
170
185
  },
186
+ "amazon.bedrock": {
187
+ "chat": {
188
+ "function_name": "boto3.bedrock-runtime.converse",
189
+ "stream_function": abedrock_stream_message,
190
+ },
191
+ "completion": {
192
+ "function_name": "boto3.bedrock-runtime.converse",
193
+ "stream_function": abedrock_stream_message,
194
+ },
195
+ },
171
196
  "anthropic.bedrock": {
172
197
  "chat": {
173
198
  "function_name": "anthropic.messages.create",
@@ -188,6 +213,7 @@ AMAP_PROVIDER_TO_FUNCTION = {
188
213
  "openai": aopenai_request,
189
214
  "openai.azure": aazure_openai_request,
190
215
  "vertexai": avertexai_request,
216
+ "amazon.bedrock": aamazon_bedrock_request,
191
217
  "anthropic.bedrock": aanthropic_bedrock_request,
192
218
  }
193
219
 
@@ -13,6 +13,7 @@ from .blueprint_builder import (
13
13
  from .response_handlers import (
14
14
  aanthropic_stream_completion,
15
15
  aanthropic_stream_message,
16
+ abedrock_stream_message,
16
17
  agoogle_stream_chat,
17
18
  agoogle_stream_completion,
18
19
  amistral_stream_chat,
@@ -20,6 +21,7 @@ from .response_handlers import (
20
21
  anthropic_stream_message,
21
22
  aopenai_stream_chat,
22
23
  aopenai_stream_completion,
24
+ bedrock_stream_message,
23
25
  google_stream_chat,
24
26
  google_stream_completion,
25
27
  mistral_stream_chat,
@@ -45,6 +47,8 @@ __all__ = [
45
47
  "aopenai_stream_completion",
46
48
  "anthropic_stream_completion",
47
49
  "aanthropic_stream_completion",
50
+ "bedrock_stream_message",
51
+ "abedrock_stream_message",
48
52
  "google_stream_chat",
49
53
  "google_stream_completion",
50
54
  "agoogle_stream_chat",
@@ -137,3 +137,38 @@ def build_prompt_blueprint_from_google_event(event, metadata):
137
137
 
138
138
  assistant_message = _build_assistant_message(assistant_content, tool_calls or None, template_format="f-string")
139
139
  return _build_prompt_blueprint(assistant_message, metadata)
140
+
141
+
142
+ def build_prompt_blueprint_from_bedrock_event(result, metadata):
143
+ """
144
+ Build a prompt blueprint from an Amazon Bedrock streaming event.
145
+ """
146
+ assistant_content = []
147
+ tool_calls = []
148
+
149
+ if "contentBlockDelta" in result:
150
+ delta = result["contentBlockDelta"].get("delta", {})
151
+
152
+ if "reasoningContent" in delta:
153
+ reasoning_text = delta["reasoningContent"].get("text", "")
154
+ signature = delta["reasoningContent"].get("signature")
155
+ assistant_content.append(_create_content_item("thinking", thinking=reasoning_text, signature=signature))
156
+
157
+ elif "text" in delta:
158
+ assistant_content.append(_create_content_item("text", text=delta["text"]))
159
+
160
+ elif "toolUse" in delta:
161
+ tool_use = delta["toolUse"]
162
+ assistant_content.append(
163
+ _create_tool_call(tool_use.get("toolUseId", ""), tool_use.get("name", ""), tool_use.get("input", ""))
164
+ )
165
+
166
+ elif "contentBlockStart" in result:
167
+ start_block = result["contentBlockStart"].get("start", {})
168
+
169
+ if "toolUse" in start_block:
170
+ tool_use = start_block["toolUse"]
171
+ tool_calls.append(_create_tool_call(tool_use.get("toolUseId", ""), tool_use.get("name", ""), ""))
172
+
173
+ assistant_message = _build_assistant_message(assistant_content, tool_calls or None)
174
+ return _build_prompt_blueprint(assistant_message, metadata)
@@ -5,6 +5,7 @@ This module contains handlers that process streaming responses from various
5
5
  LLM providers and return both the final response and prompt blueprint.
6
6
  """
7
7
 
8
+ import json
8
9
  from typing import Any, AsyncIterable, List
9
10
 
10
11
 
@@ -548,3 +549,147 @@ async def amistral_stream_chat(generator: AsyncIterable[Any]) -> Any:
548
549
  response.choices[0].message.content = content
549
550
  response.choices[0].message.tool_calls = tool_calls
550
551
  return response
552
+
553
+
554
+ def bedrock_stream_message(results: list):
555
+ """Process Amazon Bedrock streaming message results and return response + blueprint"""
556
+
557
+ response = {"ResponseMetadata": {}, "output": {"message": {}}, "stopReason": "end_turn", "metrics": {}, "usage": {}}
558
+
559
+ content_blocks = []
560
+ current_tool_call = None
561
+ current_tool_input = ""
562
+ current_text = ""
563
+ current_signature = ""
564
+ current_thinking = ""
565
+
566
+ for event in results:
567
+ if "contentBlockStart" in event:
568
+ content_block = event["contentBlockStart"]
569
+ if "start" in content_block and "toolUse" in content_block["start"]:
570
+ tool_use = content_block["start"]["toolUse"]
571
+ current_tool_call = {"toolUse": {"toolUseId": tool_use["toolUseId"], "name": tool_use["name"]}}
572
+ current_tool_input = ""
573
+
574
+ elif "contentBlockDelta" in event:
575
+ delta = event["contentBlockDelta"]["delta"]
576
+ if "text" in delta:
577
+ current_text += delta["text"]
578
+ elif "reasoningContent" in delta:
579
+ reasoning_content = delta["reasoningContent"]
580
+ if "text" in reasoning_content:
581
+ current_thinking += reasoning_content["text"]
582
+ elif "signature" in reasoning_content:
583
+ current_signature += reasoning_content["signature"]
584
+ elif "toolUse" in delta:
585
+ if "input" in delta["toolUse"]:
586
+ input_chunk = delta["toolUse"]["input"]
587
+ current_tool_input += input_chunk
588
+ if not input_chunk.strip():
589
+ continue
590
+
591
+ elif "contentBlockStop" in event:
592
+ if current_tool_call and current_tool_input:
593
+ try:
594
+ current_tool_call["toolUse"]["input"] = json.loads(current_tool_input)
595
+ except json.JSONDecodeError:
596
+ current_tool_call["toolUse"]["input"] = {}
597
+ content_blocks.append(current_tool_call)
598
+ current_tool_call = None
599
+ current_tool_input = ""
600
+ elif current_text:
601
+ content_blocks.append({"text": current_text})
602
+ current_text = ""
603
+ elif current_thinking and current_signature:
604
+ content_blocks.append(
605
+ {
606
+ "reasoningContent": {
607
+ "reasoningText": {"text": current_thinking, "signature": current_signature},
608
+ }
609
+ }
610
+ )
611
+ current_thinking = ""
612
+ current_signature = ""
613
+
614
+ elif "messageStop" in event:
615
+ response["stopReason"] = event["messageStop"]["stopReason"]
616
+
617
+ elif "metadata" in event:
618
+ metadata = event["metadata"]
619
+ response["usage"] = metadata.get("usage", {})
620
+ response["metrics"] = metadata.get("metrics", {})
621
+
622
+ response["output"]["message"] = {"role": "assistant", "content": content_blocks}
623
+ return response
624
+
625
+
626
+ async def abedrock_stream_message(generator: AsyncIterable[Any]) -> Any:
627
+ """Async version of bedrock_stream_message"""
628
+
629
+ response = {"ResponseMetadata": {}, "output": {"message": {}}, "stopReason": "end_turn", "metrics": {}, "usage": {}}
630
+
631
+ content_blocks = []
632
+ current_tool_call = None
633
+ current_tool_input = ""
634
+ current_text = ""
635
+ current_signature = ""
636
+ current_thinking = ""
637
+
638
+ async for event in generator:
639
+ if "contentBlockStart" in event:
640
+ content_block = event["contentBlockStart"]
641
+ if "start" in content_block and "toolUse" in content_block["start"]:
642
+ tool_use = content_block["start"]["toolUse"]
643
+ current_tool_call = {"toolUse": {"toolUseId": tool_use["toolUseId"], "name": tool_use["name"]}}
644
+ current_tool_input = ""
645
+
646
+ elif "contentBlockDelta" in event:
647
+ delta = event["contentBlockDelta"]["delta"]
648
+ if "text" in delta:
649
+ current_text += delta["text"]
650
+ elif "reasoningContent" in delta:
651
+ reasoning_content = delta["reasoningContent"]
652
+ if "text" in reasoning_content:
653
+ current_thinking += reasoning_content["text"]
654
+ elif "signature" in reasoning_content:
655
+ current_signature += reasoning_content["signature"]
656
+ elif "toolUse" in delta:
657
+ if "input" in delta["toolUse"]:
658
+ input_chunk = delta["toolUse"]["input"]
659
+ current_tool_input += input_chunk
660
+ if not input_chunk.strip():
661
+ continue
662
+
663
+ elif "contentBlockStop" in event:
664
+ if current_tool_call and current_tool_input:
665
+ try:
666
+ current_tool_call["toolUse"]["input"] = json.loads(current_tool_input)
667
+ except json.JSONDecodeError:
668
+ current_tool_call["toolUse"]["input"] = {}
669
+ content_blocks.append(current_tool_call)
670
+ current_tool_call = None
671
+ current_tool_input = ""
672
+ elif current_text:
673
+ content_blocks.append({"text": current_text})
674
+ current_text = ""
675
+ elif current_thinking and current_signature:
676
+ content_blocks.append(
677
+ {
678
+ "reasoningContent": {
679
+ "reasoningText": {"text": current_thinking, "signature": current_signature},
680
+ }
681
+ }
682
+ )
683
+ current_thinking = ""
684
+ current_signature = ""
685
+
686
+ elif "messageStop" in event:
687
+ response["stopReason"] = event["messageStop"]["stopReason"]
688
+
689
+ elif "metadata" in event:
690
+ metadata = event["metadata"]
691
+ response["usage"] = metadata.get("usage", {})
692
+ response["metrics"] = metadata.get("metrics", {})
693
+
694
+ response["output"]["message"] = {"role": "assistant", "content": content_blocks}
695
+ return response
@@ -2,6 +2,7 @@ from typing import Any, AsyncGenerator, AsyncIterable, Callable, Dict, Generator
2
2
 
3
3
  from .blueprint_builder import (
4
4
  build_prompt_blueprint_from_anthropic_event,
5
+ build_prompt_blueprint_from_bedrock_event,
5
6
  build_prompt_blueprint_from_google_event,
6
7
  build_prompt_blueprint_from_openai_chunk,
7
8
  )
@@ -18,12 +19,15 @@ def _build_stream_blueprint(result: Any, metadata: Dict) -> Any:
18
19
  elif provider == "google" or (provider == "vertexai" and model_name.startswith("gemini")):
19
20
  return build_prompt_blueprint_from_google_event(result, metadata)
20
21
 
21
- elif provider in ["anthropic", "anthropic.bedrock"] or (provider == "vertexai" and model_name.startswith("claude")):
22
+ elif provider in ("anthropic", "anthropic.bedrock") or (provider == "vertexai" and model_name.startswith("claude")):
22
23
  return build_prompt_blueprint_from_anthropic_event(result, metadata)
23
24
 
24
25
  elif provider == "mistral":
25
26
  return build_prompt_blueprint_from_openai_chunk(result.data, metadata)
26
27
 
28
+ elif provider == "amazon.bedrock":
29
+ return build_prompt_blueprint_from_bedrock_event(result, metadata)
30
+
27
31
  return None
28
32
 
29
33
 
@@ -37,6 +41,11 @@ def _build_stream_data(result: Any, stream_blueprint: Any, request_id: Any = Non
37
41
 
38
42
  def stream_response(*, generator: Generator, after_stream: Callable, map_results: Callable, metadata: Dict):
39
43
  results = []
44
+ provider = metadata.get("model", {}).get("provider", "")
45
+ if provider == "amazon.bedrock":
46
+ response_metadata = generator.get("ResponseMetadata", {})
47
+ generator = generator.get("stream", generator)
48
+
40
49
  for result in generator:
41
50
  results.append(result)
42
51
 
@@ -45,7 +54,12 @@ def stream_response(*, generator: Generator, after_stream: Callable, map_results
45
54
  yield data
46
55
 
47
56
  request_response = map_results(results)
48
- response = after_stream(request_response=request_response.model_dump(mode="json"))
57
+ if provider == "amazon.bedrock":
58
+ request_response["ResponseMetadata"] = response_metadata
59
+ else:
60
+ request_response = request_response.model_dump(mode="json")
61
+
62
+ response = after_stream(request_response=request_response)
49
63
  data["request_id"] = response.get("request_id")
50
64
  data["prompt_blueprint"] = response.get("prompt_blueprint")
51
65
  yield data
@@ -58,6 +72,10 @@ async def astream_response(
58
72
  metadata: Dict[str, Any] = None,
59
73
  ) -> AsyncGenerator[Dict[str, Any], None]:
60
74
  results = []
75
+ provider = metadata.get("model", {}).get("provider", "")
76
+ if provider == "amazon.bedrock":
77
+ response_metadata = generator.get("ResponseMetadata", {})
78
+ generator = generator.get("stream", generator)
61
79
 
62
80
  async for result in generator:
63
81
  results.append(result)
@@ -71,7 +89,13 @@ async def astream_response(
71
89
  yield item
72
90
 
73
91
  request_response = await map_results(async_generator_from_list(results))
74
- after_stream_response = await after_stream(request_response=request_response.model_dump(mode="json"))
92
+
93
+ if provider == "amazon.bedrock":
94
+ request_response["ResponseMetadata"] = response_metadata
95
+ else:
96
+ request_response = request_response.model_dump(mode="json")
97
+
98
+ after_stream_response = await after_stream(request_response=request_response)
75
99
  data["request_id"] = after_stream_response.get("request_id")
76
100
  data["prompt_blueprint"] = after_stream_response.get("prompt_blueprint")
77
101
  yield data
@@ -1455,7 +1455,14 @@ MAP_TYPE_TO_GOOGLE_FUNCTION = {
1455
1455
  def google_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1456
1456
  from google import genai
1457
1457
 
1458
- client = genai.Client()
1458
+ if os.environ.get("GOOGLE_GENAI_USE_VERTEXAI") == "true":
1459
+ client = genai.Client(
1460
+ vertexai=True,
1461
+ project=os.environ.get("GOOGLE_CLOUD_PROJECT"),
1462
+ location=os.environ.get("GOOGLE_CLOUD_LOCATION"),
1463
+ )
1464
+ else:
1465
+ client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY") or os.environ.get("GOOGLE_API_KEY"))
1459
1466
  request_to_make = MAP_TYPE_TO_GOOGLE_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1460
1467
  return request_to_make(client, **function_kwargs)
1461
1468
 
@@ -1493,7 +1500,14 @@ AMAP_TYPE_TO_GOOGLE_FUNCTION = {
1493
1500
  async def agoogle_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1494
1501
  from google import genai
1495
1502
 
1496
- client = genai.Client()
1503
+ if os.environ.get("GOOGLE_GENAI_USE_VERTEXAI") == "true":
1504
+ client = genai.Client(
1505
+ vertexai=True,
1506
+ project=os.environ.get("GOOGLE_CLOUD_PROJECT"),
1507
+ location=os.environ.get("GOOGLE_CLOUD_LOCATION"),
1508
+ )
1509
+ else:
1510
+ client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY") or os.environ.get("GOOGLE_API_KEY"))
1497
1511
  request_to_make = AMAP_TYPE_TO_GOOGLE_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1498
1512
  return await request_to_make(client, **function_kwargs)
1499
1513
 
@@ -1544,6 +1558,51 @@ async def avertexai_request(prompt_blueprint: GetPromptTemplateResponse, client_
1544
1558
  )
1545
1559
 
1546
1560
 
1561
+ def amazon_bedrock_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1562
+ import boto3
1563
+
1564
+ bedrock_client = boto3.client(
1565
+ "bedrock-runtime",
1566
+ aws_access_key_id=function_kwargs.pop("aws_access_key", None),
1567
+ aws_secret_access_key=function_kwargs.pop("aws_secret_key", None),
1568
+ region_name=function_kwargs.pop("aws_region", "us-east-1"),
1569
+ )
1570
+
1571
+ stream = function_kwargs.pop("stream", False)
1572
+
1573
+ if stream:
1574
+ return bedrock_client.converse_stream(**function_kwargs)
1575
+ else:
1576
+ return bedrock_client.converse(**function_kwargs)
1577
+
1578
+
1579
+ async def aamazon_bedrock_request(
1580
+ prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict
1581
+ ):
1582
+ import aioboto3
1583
+
1584
+ aws_access_key = function_kwargs.pop("aws_access_key", None)
1585
+ aws_secret_key = function_kwargs.pop("aws_secret_key", None)
1586
+ aws_region = function_kwargs.pop("aws_region", "us-east-1")
1587
+
1588
+ session_kwargs = {}
1589
+ if aws_access_key:
1590
+ session_kwargs["aws_access_key_id"] = aws_access_key
1591
+ if aws_secret_key:
1592
+ session_kwargs["aws_secret_access_key"] = aws_secret_key
1593
+ if aws_region:
1594
+ session_kwargs["region_name"] = aws_region
1595
+
1596
+ stream = function_kwargs.pop("stream", False)
1597
+ session = aioboto3.Session()
1598
+
1599
+ async with session.client("bedrock-runtime", **session_kwargs) as client:
1600
+ if stream:
1601
+ return await client.converse_stream(**function_kwargs)
1602
+ else:
1603
+ return await client.converse(**function_kwargs)
1604
+
1605
+
1547
1606
  def anthropic_bedrock_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1548
1607
  from anthropic import AnthropicBedrock
1549
1608
 
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "promptlayer"
3
- version = "1.0.64"
3
+ version = "1.0.66"
4
4
  description = "PromptLayer is a platform for prompt engineering and tracks your LLM requests."
5
5
  authors = ["Magniv <hello@magniv.io>"]
6
6
  license = "Apache-2.0"
@@ -29,6 +29,8 @@ pytest-network = "^0.0.1"
29
29
  pytest-parametrize-cases = "^0.1.2"
30
30
  pydantic = "^2.11.7"
31
31
  pydantic-settings = "^2.10.1"
32
+ boto3 = "^1.35.0"
33
+ aioboto3 = "^13.0.0"
32
34
 
33
35
  [build-system]
34
36
  requires = ["poetry-core"]
File without changes
File without changes